diff --git a/.circleci/config.yml b/.circleci/config.yml index 30f2d5c01..4daadced5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -805,6 +805,11 @@ workflows: suite: itest-deals_padding target: "./itests/deals_padding_test.go" + - test: + name: test-itest-deals_partial_retrieval_dm-level + suite: itest-deals_partial_retrieval_dm-level + target: "./itests/deals_partial_retrieval_dm-level_test.go" + - test: name: test-itest-deals_partial_retrieval suite: itest-deals_partial_retrieval @@ -890,6 +895,11 @@ workflows: suite: itest-sector_terminate target: "./itests/sector_terminate_test.go" + - test: + name: test-itest-self_sent_txn + suite: itest-self_sent_txn + target: "./itests/self_sent_txn_test.go" + - test: name: test-itest-tape suite: itest-tape @@ -971,19 +981,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/.circleci/template.yml b/.circleci/template.yml index 4b954391b..666ad39f5 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -816,19 +816,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..76d38d292 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +## Related Issues + + +## Proposed Changes + + + +## Additional Info + + +## Checklist + +Before you mark the PR ready for review, please make sure that: +- [ ] All commits have a clear commit message. +- [ ] The PR title is in the form of of `: <#issue number> : ` + - example: ` fix: #1234 mempool: Introduce a cache for valid signatures` + - `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_, _misc_,_perf_, _refactor_, _revert_, _style_, _test_ + - `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_ +- [ ] This PR has tests for new functionality or change in behaviour +- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials) +- [ ] CI is green diff --git a/CHANGELOG.md b/CHANGELOG.md index 3bdb9b377..04471452f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,88 @@ # Lotus changelog +# v1.13.1 / 2021-11-26 + +This is an optional Lotus v1.13.1 release. + +## New Features +- Shed: Add a util to find miner based on peerid ([filecoin-project/lotus#7544](https://github.com/filecoin-project/lotus/pull/7544)) +- Collect and expose graphsync metrics ([filecoin-project/lotus#7542](https://github.com/filecoin-project/lotus/pull/7542)) +- Shed: Add a util to find the most recent null tipset ([filecoin-project/lotus#7456](https://github.com/filecoin-project/lotus/pull/7456)) + +## Improvements +- Show prepared tasks in sealing jobs ([filecoin-project/lotus#7527](https://github.com/filecoin-project/lotus/pull/7527)) +- To make Deep happy ([filecoin-project/lotus#7546](https://github.com/filecoin-project/lotus/pull/7546)) +- Expose per-state sector counts on the prometheus endpoint ([filecoin-project/lotus#7541](https://github.com/filecoin-project/lotus/pull/7541)) +- Add storage-id flag to proving check ([filecoin-project/lotus#7479](https://github.com/filecoin-project/lotus/pull/7479)) +- FilecoinEC: Improve a log message ([filecoin-project/lotus#7499](https://github.com/filecoin-project/lotus/pull/7499)) +- itests: retry deal when control addr is out of funds ([filecoin-project/lotus#7454](https://github.com/filecoin-project/lotus/pull/7454)) +- Normlize selector use within lotus ([filecoin-project/lotus#7467](https://github.com/filecoin-project/lotus/pull/7467)) +- sealing: Improve scheduling of ready work ([filecoin-project/lotus#7335](https://github.com/filecoin-project/lotus/pull/7335)) +- Remove dead example code + dep ([filecoin-project/lotus#7466](https://github.com/filecoin-project/lotus/pull/7466)) + +## Bug Fixes +- fix the withdrawn amount unit ([filecoin-project/lotus#7563](https://github.com/filecoin-project/lotus/pull/7563)) +- rename vm#make{=>Account}Actor(). ([filecoin-project/lotus#7562](https://github.com/filecoin-project/lotus/pull/7562)) +- Fix used sector space accounting after AddPieceFailed ([filecoin-project/lotus#7530](https://github.com/filecoin-project/lotus/pull/7530)) +- Don't remove sector data when moving data into a shared path ([filecoin-project/lotus#7494](https://github.com/filecoin-project/lotus/pull/7494)) +- fix: support node instantiation in external packages ([filecoin-project/lotus#7511](https://github.com/filecoin-project/lotus/pull/7511)) +- Stop adding Jennifer's $HOME to lotus docs ([filecoin-project/lotus#7477](https://github.com/filecoin-project/lotus/pull/7477)) +- Bugfix: Use correct startup network versions ([filecoin-project/lotus#7486](https://github.com/filecoin-project/lotus/pull/7486)) +- Dep upgrade pass ([filecoin-project/lotus#7478](https://github.com/filecoin-project/lotus/pull/7478)) +- Remove obsolete GS testplan - it now lives in go-graphsync ([filecoin-project/lotus#7469](https://github.com/filecoin-project/lotus/pull/7469)) +- sealing: Recover sectors after failed AddPiece ([filecoin-project/lotus#7444](https://github.com/filecoin-project/lotus/pull/7444)) + +## Dependency Updates +- Update go-graphsync v0.10.1 ([filecoin-project/lotus#7457](https://github.com/filecoin-project/lotus/pull/7457)) +- update to proof v10.1.0 ([filecoin-project/lotus#7564](https://github.com/filecoin-project/lotus/pull/7564)) +- github.com/filecoin-project/specs-actors/v6 (v6.0.0 -> v6.0.1): +- github.com/filecoin-project/go-jsonrpc (v0.1.4-0.20210217175800-45ea43ac2bec -> v0.1.5): +- github.com/filecoin-project/go-fil-markets (v1.13.1 -> v1.13.3): +- github.com/filecoin-project/go-data-transfer (v1.11.1 -> v1.11.4): +- github.com/filecoin-project/go-crypto (v0.0.0-20191218222705-effae4ea9f03 -> v0.0.1): +- github.com/filecoin-project/go-commp-utils (v0.1.1-0.20210427191551-70bf140d31c7 -> v0.1.2): +- github.com/filecoin-project/go-cbor-util (v0.0.0-20191219014500-08c40a1e63a2 -> v0.0.1): +- github.com/filecoin-project/go-address (v0.0.5 -> v0.0.6): +- unpin the yamux dependency ([filecoin-project/lotus#7532](https://github.com/filecoin-project/lotus/pull/7532) +- peerstore@v0.2.9 was withdrawn, let's not depend on it directly ([filecoin-project/lotus#7481](https://github.com/filecoin-project/lotus/pull/7481)) +- chore(deps): use tagged github.com/ipld/go-ipld-selector-text-lite ([filecoin-project/lotus#7464](https://github.com/filecoin-project/lotus/pull/7464)) +- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7473](https://github.com/filecoin-project/lotus/pull/7473)) + +## Others +- fix the changelog ([filecoin-project/lotus#7594](https://github.com/filecoin-project/lotus/pull/7594)) +- v1.13.1-rc2 prep ([filecoin-project/lotus#7593](https://github.com/filecoin-project/lotus/pull/7593)) +- lotus v1.13.1-rc1 ([filecoin-project/lotus#7569](https://github.com/filecoin-project/lotus/pull/7569)) +- misc: back-port v1.13.0 back to master ([filecoin-project/lotus#7537](https://github.com/filecoin-project/lotus/pull/7537)) +- Inline codegen ([filecoin-project/lotus#7495](https://github.com/filecoin-project/lotus/pull/7495)) +- releases -> master ([filecoin-project/lotus#7507](https://github.com/filecoin-project/lotus/pull/7507)) +- Make chocolate back to master ([filecoin-project/lotus#7493](https://github.com/filecoin-project/lotus/pull/7493)) +- restore filters for the build-macos job ([filecoin-project/lotus#7455](https://github.com/filecoin-project/lotus/pull/7455)) +- bump master to v1.13.1-dev ([filecoin-project/lotus#7451](https://github.com/filecoin-project/lotus/pull/7451)) + +Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k | 27 | +1285/-531 | 76 | +| @ribasushi | 7 | +265/-1635 | 21 | +| @raulk | 2 | +2/-737 | 13 | +| @nonsens | 4 | +391/-21 | 19 | +| @arajasek | 6 | +216/-23 | 14 | +| @jennijuju| 8 | +102/-37 | 29 | +| Steven Allen | 2 | +77/-29 | 6 | +| @jennijuju | 4 | +19/-18 | 11 | +| @dirkmc | 2 | +9/-9 | 4 | +| @@coryschwartz | 1 | +16/-2 | 2 | +| @frrist | 1 | +12/-0 | 2 | +| @Kubuxu | 5 | +5/-5 | 5 | +| @hunjixin | 2 | +6/-3 | 2 | +| @vyzo | 1 | +3/-3 | 2 | +| @@rvagg | 1 | +3/-3 | 2 | +| @hannahhoward | 1 | +3/-2 | 2 | +| Marten Seemann | 1 | +3/-0 | 1 | +| @ZenGround0 | 1 | +1/-1 | 1 | + + # v1.13.0 / 2021-10-18 Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 72c609305..812ad9f61 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -36,7 +36,7 @@ WORKDIR /opt/filecoin ARG RUSTFLAGS="" ARG GOFLAGS="" -RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway +RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats FROM ubuntu:20.04 AS base @@ -66,8 +66,6 @@ COPY scripts/docker-lotus-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV LOTUS_PATH /var/lib/lotus -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car ENV DOCKER_LOTUS_IMPORT_WALLET "" @@ -92,8 +90,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ ENV WALLET_PATH /var/lib/lotus-wallet -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-wallet RUN chown fc: /var/lib/lotus-wallet @@ -114,10 +110,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http - USER fc EXPOSE 1234 @@ -135,11 +127,7 @@ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY scripts/docker-lotus-miner-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http ENV LOTUS_MINER_PATH /var/lib/lotus-miner -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV DOCKER_LOTUS_MINER_INIT true RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters @@ -163,10 +151,7 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-worker @@ -186,16 +171,11 @@ CMD ["-help"] from base as lotus-all-in-one ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV LOTUS_MINER_PATH /var/lib/lotus-miner ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV WALLET_PATH /var/lib/lotus-wallet ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car -ENV DOCKER_LOTUS_MINER_INIT true COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ @@ -203,6 +183,7 @@ COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/ RUN mkdir /var/tmp/filecoin-proof-parameters RUN mkdir /var/lib/lotus diff --git a/api/api_full.go b/api/api_full.go index 158590b0d..06aaff99c 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -7,7 +7,6 @@ import ( "time" "github.com/ipfs/go-cid" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" @@ -28,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" ) @@ -352,10 +350,11 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin - // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel - // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin + // ClientRetrieveWait waits for retrieval to be complete + ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin + // ClientExport exports a file stored in the local filestore to a system file + ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write // ClientGetRetrievalUpdates returns status of updated retrieval deals @@ -630,10 +629,14 @@ type FullNode interface { // , , MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , + MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message // It takes the following params: , , , , // , , - MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigAddPropose proposes adding a signer in the multisig // It takes the following params: , , @@ -930,15 +933,14 @@ type MarketDeal struct { } type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Piece *cid.Cid - DatamodelPathSelector *textselector.Expression - Size uint64 + Root cid.Cid + Piece *cid.Cid + DataSelector *Selector + + // todo: Size/Total are only used for calculating price per byte; we should let users just pass that + Size uint64 + Total types.BigInt - FromLocalCAR string // if specified, get data from a local CARv2 file. - // TODO: support offset - Total types.BigInt UnsealPrice types.BigInt PaymentInterval uint64 PaymentIntervalIncrease uint64 diff --git a/api/api_gateway.go b/api/api_gateway.go index 862c6ddb5..fbe2e0cd6 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -31,6 +31,8 @@ import ( type Gateway interface { ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(ctx context.Context) (*types.TipSet, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error) @@ -39,6 +41,7 @@ type Gateway interface { ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainNotify(context.Context) (<-chan []*HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) diff --git a/api/api_storage.go b/api/api_storage.go index 8cca2aa5b..c0d9c9f9c 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -118,17 +118,21 @@ type StorageMiner interface { WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin //storiface.WorkerReturn - ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true - ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true - ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true + ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true + ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true // SealingSchedDiag dumps internal sealing scheduler state SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin @@ -145,6 +149,7 @@ type StorageMiner interface { StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin diff --git a/api/api_worker.go b/api/api_worker.go index 4553c30e0..68d8e7baf 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -39,6 +39,10 @@ type Worker interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 25b9ac8c9..5d87e9d20 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -91,6 +91,8 @@ func init() { storeIDExample := imports.ID(50) textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") + apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash") + clientEvent := retrievalmarket.ClientEventDealAccepted addExample(bitfield.NewFromSet([]uint64{5})) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) @@ -122,9 +124,12 @@ func init() { addExample(datatransfer.Ongoing) addExample(storeIDExample) addExample(&storeIDExample) + addExample(clientEvent) + addExample(&clientEvent) addExample(retrievalmarket.ClientEventDealAccepted) addExample(retrievalmarket.DealStatusNew) addExample(&textSelExample) + addExample(&apiSelExample) addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) addExample(map[string]int{"name": 42}) @@ -226,16 +231,18 @@ func init() { Hostname: "host", Resources: storiface.WorkerResources{ MemPhysical: 256 << 30, + MemUsed: 2 << 30, MemSwap: 120 << 30, - MemReserved: 2 << 30, + MemSwapUsed: 2 << 30, CPUs: 64, GPUs: []string{"aGPU 1337"}, + Resources: storiface.ResourceTable, }, }, Enabled: true, MemUsedMin: 0, MemUsedMax: 0, - GpuUsed: false, + GpuUsed: 0, CpuUse: 0, }, }) @@ -247,6 +254,13 @@ func init() { api.SectorState(sealing.Proving): 120, }) addExample([]abi.SectorNumber{123, 124}) + addExample([]storiface.SectorLock{ + { + Sector: abi.SectorID{Number: 123, Miner: 1000}, + Write: [storiface.FileTypes]uint{0, 0, 1}, + Read: [storiface.FileTypes]uint{2, 3, 0}, + }, + }) // worker specific addExample(storiface.AcquireMove) @@ -281,6 +295,7 @@ func init() { State: "ShardStateAvailable", Error: "", }) + addExample(storiface.ResourceTable) } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index a6781b0b7..3f9d75433 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -25,7 +25,6 @@ import ( miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" imports "github.com/filecoin-project/lotus/node/repo/imports" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" @@ -537,6 +536,20 @@ func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) } +// ClientExport mocks base method. +func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientExport indicates an expected call of ClientExport. +func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2) +} + // ClientFindData mocks base method. func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { m.ctrl.T.Helper() @@ -775,17 +788,18 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1) + ret0, _ := ret[0].(*api.RestrievalRes) + ret1, _ := ret[1].(error) + return ret0, ret1 } // ClientRetrieve indicates an expected call of ClientRetrieve. -func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1) } // ClientRetrieveTryRestartInsufficientFunds mocks base method. @@ -802,19 +816,18 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) } -// ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +// ClientRetrieveWait mocks base method. +func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) - ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. -func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { +// ClientRetrieveWait indicates an expected call of ClientRetrieveWait. +func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1) } // ClientStartDeal mocks base method. @@ -1428,18 +1441,33 @@ func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, a } // MsigCancel mocks base method. -func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } // MsigCancel indicates an expected call of MsigCancel. -func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3) +} + +// MsigCancelTxnHash mocks base method. +func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash. +func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // MsigCreate mocks base method. diff --git a/api/proxy_gen.go b/api/proxy_gen.go index b36f19a7e..10a21b8fa 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -28,7 +28,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" "github.com/filecoin-project/specs-storage/storage" @@ -162,6 +161,8 @@ type FullNodeStruct struct { ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"` + ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"` + ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"` ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"` @@ -194,11 +195,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -270,7 +271,9 @@ type FullNodeStruct struct { MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigCancelTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"` @@ -478,8 +481,14 @@ type GatewayStruct struct { Internal struct { ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` + ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `` + + ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `` + ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `` ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `` @@ -707,12 +716,20 @@ type StorageMinerStruct struct { ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnProveReplicaUpdate1 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error `perm:"admin"` + + ReturnProveReplicaUpdate2 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error `perm:"admin"` + ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"` ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error `perm:"admin"` + ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"` ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"` @@ -785,6 +802,8 @@ type StorageMinerStruct struct { StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"` + StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"` StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` @@ -844,6 +863,8 @@ type WorkerStruct struct { FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` + Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"` MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` @@ -852,10 +873,16 @@ type WorkerStruct struct { ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` + ProveReplicaUpdate1 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) `perm:"admin"` + + ProveReplicaUpdate2 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) `perm:"admin"` + ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` + ReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"` SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` @@ -1349,6 +1376,17 @@ func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, return *new(DataSize), ErrNotSupported } +func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + if s.Internal.ClientExport == nil { + return ErrNotSupported + } + return s.Internal.ClientExport(p0, p1, p2) +} + +func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + return ErrNotSupported +} + func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { if s.Internal.ClientFindData == nil { return *new([]QueryOffer), ErrNotSupported @@ -1525,15 +1563,15 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { if s.Internal.ClientRetrieve == nil { - return ErrNotSupported + return nil, ErrNotSupported } - return s.Internal.ClientRetrieve(p0, p1, p2) + return s.Internal.ClientRetrieve(p0, p1) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { - return ErrNotSupported +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { @@ -1547,15 +1585,15 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - if s.Internal.ClientRetrieveWithEvents == nil { - return nil, ErrNotSupported +func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + if s.Internal.ClientRetrieveWait == nil { + return ErrNotSupported } - return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) + return s.Internal.ClientRetrieveWait(p0, p1) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, ErrNotSupported +func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + return ErrNotSupported } func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { @@ -1943,14 +1981,25 @@ func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address return nil, ErrNotSupported } -func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { if s.Internal.MsigCancel == nil { return nil, ErrNotSupported } - return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) + return s.Internal.MsigCancel(p0, p1, p2, p3) } -func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + if s.Internal.MsigCancelTxnHash == nil { + return nil, ErrNotSupported + } + return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7) +} + +func (s *FullNodeStub) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { return nil, ErrNotSupported } @@ -3032,6 +3081,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainGetGenesis == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainGetGenesis(p0) +} + +func (s *GatewayStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { if s.Internal.ChainGetMessage == nil { return nil, ErrNotSupported @@ -3043,6 +3103,28 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + if s.Internal.ChainGetParentMessages == nil { + return *new([]Message), ErrNotSupported + } + return s.Internal.ChainGetParentMessages(p0, p1) +} + +func (s *GatewayStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + return *new([]Message), ErrNotSupported +} + +func (s *GatewayStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + if s.Internal.ChainGetParentReceipts == nil { + return *new([]*types.MessageReceipt), ErrNotSupported + } + return s.Internal.ChainGetParentReceipts(p0, p1) +} + +func (s *GatewayStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return *new([]*types.MessageReceipt), ErrNotSupported +} + func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { if s.Internal.ChainGetPath == nil { return *new([]*HeadChange), ErrNotSupported @@ -4154,6 +4236,17 @@ func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface return ErrNotSupported } +func (s *StorageMinerStruct) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnGenerateSectorKeyFromData == nil { + return ErrNotSupported + } + return s.Internal.ReturnGenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { if s.Internal.ReturnMoveStorage == nil { return ErrNotSupported @@ -4165,6 +4258,28 @@ func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.Ca return ErrNotSupported } +func (s *StorageMinerStruct) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate1 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate1(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + return ErrNotSupported +} + +func (s *StorageMinerStruct) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate2 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate2(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { if s.Internal.ReturnReadPiece == nil { return ErrNotSupported @@ -4187,6 +4302,17 @@ func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storifac return ErrNotSupported } +func (s *StorageMinerStruct) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + if s.Internal.ReturnReplicaUpdate == nil { + return ErrNotSupported + } + return s.Internal.ReturnReplicaUpdate(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { if s.Internal.ReturnSealCommit1 == nil { return ErrNotSupported @@ -4583,6 +4709,17 @@ func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID return *new([]stores.SectorStorageInfo), ErrNotSupported } +func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + if s.Internal.StorageGetLocks == nil { + return *new(storiface.SectorLocks), ErrNotSupported + } + return s.Internal.StorageGetLocks(p0) +} + +func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + return *new(storiface.SectorLocks), ErrNotSupported +} + func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { if s.Internal.StorageInfo == nil { return *new(stores.StorageInfo), ErrNotSupported @@ -4814,6 +4951,17 @@ func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 return *new(storiface.CallID), ErrNotSupported } +func (s *WorkerStruct) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + if s.Internal.GenerateSectorKeyFromData == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.GenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) { if s.Internal.Info == nil { return *new(storiface.WorkerInfo), ErrNotSupported @@ -4858,6 +5006,28 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { return *new(uuid.UUID), ErrNotSupported } +func (s *WorkerStruct) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate1 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate1(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + +func (s *WorkerStruct) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate2 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate2(p0, p1, p2, p3, p4, p5) +} + +func (s *WorkerStub) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { if s.Internal.ReleaseUnsealed == nil { return *new(storiface.CallID), ErrNotSupported @@ -4880,6 +5050,17 @@ func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error { return ErrNotSupported } +func (s *WorkerStruct) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + if s.Internal.ReplicaUpdate == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ReplicaUpdate(p0, p1, p2) +} + +func (s *WorkerStub) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { if s.Internal.SealCommit1 == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/api/types.go b/api/types.go index 9d887b0a1..0ecda0405 100644 --- a/api/types.go +++ b/api/types.go @@ -5,11 +5,10 @@ import ( "fmt" "time" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/lotus/chain/types" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" @@ -194,4 +193,47 @@ type RetrievalInfo struct { TransferChannelID *datatransfer.ChannelID DataTransfer *DataTransferChannel + + // optional event if part of ClientGetRetrievalUpdates + Event *retrievalmarket.ClientEvent +} + +type RestrievalRes struct { + DealID retrievalmarket.DealID +} + +// Selector specifies ipld selector string +// - if the string starts with '{', it's interpreted as json selector string +// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/ +// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path) +// see https://github.com/ipld/go-ipld-selector-text-lite +type Selector string + +type DagSpec struct { + // DataSelector matches data to be retrieved + // - when using textselector, the path specifies subtree + // - the matched graph must have a single root + DataSelector *Selector + + // ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector + // When true, in addition to the selection target, the resulting CAR will contain every block along the + // path back to, and including the original root + // When false the resulting CAR contains only the blocks of the target subdag + ExportMerkleProof bool +} + +type ExportRef struct { + Root cid.Cid + + // DAGs array specifies a list of DAGs to export + // - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node + // - If exporting into a car file + // - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root + // - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car + // - When not specified defaults to a single DAG: + // - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}` + DAGs []DagSpec + + FromLocalCAR string // if specified, get data from a local CARv2 file. + DealID retrievalmarket.DealID } diff --git a/api/v0api/full.go b/api/v0api/full.go index d7e38ce97..b37e89155 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" + textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/lotus/api" @@ -325,10 +326,10 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin + ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin // ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write @@ -714,3 +715,37 @@ type FullNode interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin } + +func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder { + return RetrievalOrder{ + Root: o.Root, + Piece: o.Piece, + Size: o.Size, + Total: o.MinPrice, + UnsealPrice: o.UnsealPrice, + PaymentInterval: o.PaymentInterval, + PaymentIntervalIncrease: o.PaymentIntervalIncrease, + Client: client, + + Miner: o.Miner, + MinerPeer: &o.MinerPeer, + } +} + +type RetrievalOrder struct { + // TODO: make this less unixfs specific + Root cid.Cid + Piece *cid.Cid + DatamodelPathSelector *textselector.Expression + Size uint64 + + FromLocalCAR string // if specified, get data from a local CARv2 file. + // TODO: support offset + Total types.BigInt + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Client address.Address + Miner address.Address + MinerPeer *retrievalmarket.RetrievalPeer +} diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index dd6330a02..af0687fe5 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -125,11 +125,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -965,14 +965,14 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { if s.Internal.ClientRetrieve == nil { return ErrNotSupported } return s.Internal.ClientRetrieve(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { return ErrNotSupported } @@ -987,14 +987,14 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { if s.Internal.ClientRetrieveWithEvents == nil { return nil, ErrNotSupported } return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { return nil, ErrNotSupported } diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 0344eebf3..3e9caaee8 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -21,6 +21,7 @@ import ( network "github.com/filecoin-project/go-state-types/network" api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" + v0api "github.com/filecoin-project/lotus/api/v0api" miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" @@ -760,7 +761,7 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -788,7 +789,7 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar } // ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index 7f7291600..7e0d7a94a 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -3,7 +3,10 @@ package v0api import ( "context" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" @@ -108,7 +111,7 @@ func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Add } func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params) + p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params) if err != nil { return cid.Undef, xerrors.Errorf("creating prototype: %w", err) } @@ -194,4 +197,144 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk) } +func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + + for { + select { + case evt, ok := <-events: + if !ok { // done successfully + return nil + } + + if evt.Err != "" { + return xerrors.Errorf("retrieval failed: %s", evt.Err) + } + case <-ctx.Done(): + return xerrors.Errorf("retrieval timed out") + } + } +} + +func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + return events, nil +} + +func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error { + for { + var subscribeEvent api.RetrievalInfo + var evt retrievalmarket.ClientEvent + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case subscribeEvent = <-subscribeEvents: + if subscribeEvent.ID != dealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + if subscribeEvent.Event != nil { + evt = *subscribeEvent.Event + } + } + + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case events <- marketevents.RetrievalEvent{ + Event: evt, + Status: subscribeEvent.Status, + BytesReceived: subscribeEvent.BytesReceived, + FundsSpent: subscribeEvent.TotalPaid, + }: + } + + switch subscribeEvent.Status { + case retrievalmarket.DealStatusCompleted: + return nil + case retrievalmarket.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message) + } + } +} + +func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { + defer close(events) + + finish := func(e error) { + if e != nil { + events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} + } + } + + var dealID retrievalmarket.DealID + if order.FromLocalCAR == "" { + // Subscribe to events before retrieving to avoid losing events. + subscribeCtx, cancel := context.WithCancel(ctx) + defer cancel() + retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx) + + if err != nil { + finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err)) + return + } + + retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{ + Root: order.Root, + Piece: order.Piece, + Size: order.Size, + Total: order.Total, + UnsealPrice: order.UnsealPrice, + PaymentInterval: order.PaymentInterval, + PaymentIntervalIncrease: order.PaymentIntervalIncrease, + Client: order.Client, + Miner: order.Miner, + MinerPeer: order.MinerPeer, + }) + + if err != nil { + finish(xerrors.Errorf("Retrieve failed: %w", err)) + return + } + + dealID = retrievalRes.DealID + + err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events) + if err != nil { + finish(xerrors.Errorf("Retrieve: %w", err)) + return + } + } + + // If ref is nil, it only fetches the data into the configured blockstore. + if ref == nil { + finish(nil) + return + } + + eref := api.ExportRef{ + Root: order.Root, + FromLocalCAR: order.FromLocalCAR, + DealID: dealID, + } + + if order.DatamodelPathSelector != nil { + s := api.Selector(*order.DatamodelPathSelector) + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &s, + ExportMerkleProof: true, + }) + } + + finish(w.ClientExport(ctx, eref, *ref)) +} + var _ FullNode = &WrapperV1Full{} diff --git a/api/version.go b/api/version.go index 2c87fe0a4..93148f28d 100644 --- a/api/version.go +++ b/api/version.go @@ -58,7 +58,7 @@ var ( FullAPIVersion1 = newVer(2, 1, 0) MinerAPIVersion0 = newVer(1, 2, 0) - WorkerAPIVersion0 = newVer(1, 1, 0) + WorkerAPIVersion0 = newVer(1, 5, 0) ) //nolint:varcheck,deadcode diff --git a/blockstore/api.go b/blockstore/api.go index 6715b4766..dc4c03452 100644 --- a/blockstore/api.go +++ b/blockstore/api.go @@ -25,35 +25,35 @@ func NewAPIBlockstore(cio ChainIO) Blockstore { return Adapt(bs) // return an adapted blockstore. } -func (a *apiBlockstore) DeleteBlock(cid.Cid) error { +func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.New("not supported") } -func (a *apiBlockstore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) +func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return a.api.ChainHasObj(ctx, c) } -func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return nil, err } return blocks.NewBlockWithCid(bb, c) } -func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return 0, err } return len(bb), nil } -func (a *apiBlockstore) Put(blocks.Block) error { +func (a *apiBlockstore) Put(context.Context, blocks.Block) error { return xerrors.New("not supported") } -func (a *apiBlockstore) PutMany([]blocks.Block) error { +func (a *apiBlockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.New("not supported") } diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index a0b51d8df..270e5b820 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -525,7 +525,7 @@ func (b *Blockstore) Size() (int64, error) { // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. -func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { +func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) error) error { if err := b.access(); err != nil { return err } @@ -552,7 +552,7 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { } // Has implements Blockstore.Has. -func (b *Blockstore) Has(cid cid.Cid) (bool, error) { +func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if err := b.access(); err != nil { return false, err } @@ -582,7 +582,7 @@ func (b *Blockstore) Has(cid cid.Cid) (bool, error) { } // Get implements Blockstore.Get. -func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if !cid.Defined() { return nil, blockstore.ErrNotFound } @@ -619,7 +619,7 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { } // GetSize implements Blockstore.GetSize. -func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { +func (b *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if err := b.access(); err != nil { return 0, err } @@ -652,7 +652,7 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { } // Put implements Blockstore.Put. -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error { if err := b.access(); err != nil { return err } @@ -691,7 +691,7 @@ func (b *Blockstore) Put(block blocks.Block) error { } // PutMany implements Blockstore.PutMany. -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { if err := b.access(); err != nil { return err } @@ -755,7 +755,7 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error { } // DeleteBlock implements Blockstore.DeleteBlock. -func (b *Blockstore) DeleteBlock(cid cid.Cid) error { +func (b *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { if err := b.access(); err != nil { return err } @@ -774,7 +774,7 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error { }) } -func (b *Blockstore) DeleteMany(cids []cid.Cid) error { +func (b *Blockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { if err := b.access(); err != nil { return err } diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index d8ef5241b..4619d4ec3 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -2,6 +2,7 @@ package badgerbs import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -98,6 +99,7 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, } func testMove(t *testing.T, optsF func(string) Options) { + ctx := context.Background() basePath, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) @@ -122,7 +124,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // add some blocks for i := 0; i < 10; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { t.Fatal(err) } @@ -132,7 +134,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // delete some of them for i := 5; i < 10; i++ { c := have[i].Cid() - err := db.DeleteBlock(c) + err := db.DeleteBlock(ctx, c) if err != nil { t.Fatal(err) } @@ -145,7 +147,7 @@ func testMove(t *testing.T, optsF func(string) Options) { g.Go(func() error { for i := 10; i < 1000; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { return err } @@ -165,7 +167,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // now check that we have all the blocks in have and none in the deleted lists checkBlocks := func() { for _, blk := range have { - has, err := db.Has(blk.Cid()) + has, err := db.Has(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -174,7 +176,7 @@ func testMove(t *testing.T, optsF func(string) Options) { t.Fatal("missing block") } - blk2, err := db.Get(blk.Cid()) + blk2, err := db.Get(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testMove(t *testing.T, optsF func(string) Options) { } for _, c := range deleted { - has, err := db.Has(c) + has, err := db.Has(ctx, c) if err != nil { t.Fatal(err) } diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go index 93be82ac8..167d1b2ab 100644 --- a/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -44,28 +44,31 @@ func (s *Suite) RunTests(t *testing.T, prefix string) { } func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } c := cid.NewCidV0(u.Hash([]byte("stuff"))) - bl, err := bs.Get(c) + bl, err := bs.Get(ctx, c) require.Nil(t, bl) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } - _, err := bs.Get(cid.Undef) + _, err := bs.Get(ctx, cid.Undef) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestPutThenGetBlock(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -73,15 +76,16 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestHas(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -89,19 +93,20 @@ func (s *Suite) TestHas(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - ok, err := bs.Has(orig.Cid()) + ok, err := bs.Has(ctx, orig.Cid()) require.NoError(t, err) require.True(t, ok) - ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid()) + ok, err = bs.Has(ctx, blocks.NewBlock([]byte("another thing")).Cid()) require.NoError(t, err) require.False(t, ok) } func (s *Suite) TestCidv0v1(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -109,15 +114,17 @@ func (s *Suite) TestCidv0v1(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) + fetched, err := bs.Get(ctx, cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + ctx := context.Background() + bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -127,21 +134,21 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { missingBlock := blocks.NewBlock([]byte("missingBlock")) emptyBlock := blocks.NewBlock([]byte{}) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) - blockSize, err := bs.GetSize(block.Cid()) + blockSize, err := bs.GetSize(ctx, block.Cid()) require.NoError(t, err) require.Len(t, block.RawData(), blockSize) - err = bs.Put(emptyBlock) + err = bs.Put(ctx, emptyBlock) require.NoError(t, err) - emptySize, err := bs.GetSize(emptyBlock.Cid()) + emptySize, err := bs.GetSize(ctx, emptyBlock.Cid()) require.NoError(t, err) require.Zero(t, emptySize) - missingSize, err := bs.GetSize(missingBlock.Cid()) + missingSize, err := bs.GetSize(ctx, missingBlock.Cid()) require.Equal(t, blockstore.ErrNotFound, err) require.Equal(t, -1, missingSize) } @@ -203,6 +210,7 @@ func (s *Suite) TestDoubleClose(t *testing.T) { } func (s *Suite) TestReopenPutGet(t *testing.T) { + ctx := context.Background() bs, path := s.NewBlockstore(t) c, ok := bs.(io.Closer) if !ok { @@ -210,7 +218,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) err = c.Close() @@ -219,7 +227,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { bs, err = s.OpenBlockstore(t, path) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) @@ -228,6 +236,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } func (s *Suite) TestPutMany(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -238,15 +247,15 @@ func (s *Suite) TestPutMany(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) for _, blk := range blks { - fetched, err := bs.Get(blk.Cid()) + fetched, err := bs.Get(ctx, blk.Cid()) require.NoError(t, err) require.Equal(t, blk.RawData(), fetched.RawData()) - ok, err := bs.Has(blk.Cid()) + ok, err := bs.Has(ctx, blk.Cid()) require.NoError(t, err) require.True(t, ok) } @@ -259,6 +268,7 @@ func (s *Suite) TestPutMany(t *testing.T) { } func (s *Suite) TestDelete(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -269,10 +279,10 @@ func (s *Suite) TestDelete(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) - err = bs.DeleteBlock(blks[1].Cid()) + err = bs.DeleteBlock(ctx, blks[1].Cid()) require.NoError(t, err) ch, err := bs.AllKeysChan(context.Background()) @@ -285,17 +295,17 @@ func (s *Suite) TestDelete(t *testing.T) { cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()), }) - has, err := bs.Has(blks[1].Cid()) + has, err := bs.Has(ctx, blks[1].Cid()) require.NoError(t, err) require.False(t, has) - } func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid { + ctx := context.Background() keys := make([]cid.Cid, count) for i := 0; i < count; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what // the store returns. diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 8ede31eb9..409c100cf 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -1,6 +1,8 @@ package blockstore import ( + "context" + cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -27,7 +29,7 @@ type BasicBlockstore = blockstore.Blockstore type Viewer = blockstore.Viewer type BatchDeleter interface { - DeleteMany(cids []cid.Cid) error + DeleteMany(ctx context.Context, cids []cid.Cid) error } // BlockstoreIterator is a trait for efficient iteration @@ -93,17 +95,17 @@ type adaptedBlockstore struct { var _ Blockstore = (*adaptedBlockstore)(nil) -func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error { - blk, err := a.Get(cid) +func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(ctx, cid) if err != nil { return err } return callback(blk.RawData()) } -func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error { +func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { for _, cid := range cids { - err := a.DeleteBlock(cid) + err := a.DeleteBlock(ctx, cid) if err != nil { return err } diff --git a/blockstore/buffered.go b/blockstore/buffered.go index 5d3d38f78..8e23b5362 100644 --- a/blockstore/buffered.go +++ b/blockstore/buffered.go @@ -88,34 +88,34 @@ func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, return out, nil } -func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error { - if err := bs.read.DeleteBlock(c); err != nil { +func (bs *BufferedBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + if err := bs.read.DeleteBlock(ctx, c); err != nil { return err } - return bs.write.DeleteBlock(c) + return bs.write.DeleteBlock(ctx, c) } -func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error { - if err := bs.read.DeleteMany(cids); err != nil { +func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + if err := bs.read.DeleteMany(ctx, cids); err != nil { return err } - return bs.write.DeleteMany(cids) + return bs.write.DeleteMany(ctx, cids) } -func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error { +func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { // both stores are viewable. - if err := bs.write.View(c, callback); err == ErrNotFound { + if err := bs.write.View(ctx, c, callback); err == ErrNotFound { // not found in write blockstore; fall through. } else { return err // propagate errors, or nil, i.e. found. } - return bs.read.View(c, callback) + return bs.read.View(ctx, c, callback) } -func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.write.Get(c); err != nil { +func (bs *BufferedBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(ctx, c); err != nil { if err != ErrNotFound { return nil, err } @@ -123,20 +123,20 @@ func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { return out, nil } - return bs.read.Get(c) + return bs.read.Get(ctx, c) } -func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) { - s, err := bs.read.GetSize(c) +func (bs *BufferedBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + s, err := bs.read.GetSize(ctx, c) if err == ErrNotFound || s == 0 { - return bs.write.GetSize(c) + return bs.write.GetSize(ctx, c) } return s, err } -func (bs *BufferedBlockstore) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check +func (bs *BufferedBlockstore) Put(ctx context.Context, blk block.Block) error { + has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check if err != nil { return err } @@ -145,11 +145,11 @@ func (bs *BufferedBlockstore) Put(blk block.Block) error { return nil } - return bs.write.Put(blk) + return bs.write.Put(ctx, blk) } -func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { - has, err := bs.write.Has(c) +func (bs *BufferedBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + has, err := bs.write.Has(ctx, c) if err != nil { return false, err } @@ -157,7 +157,7 @@ func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { return true, nil } - return bs.read.Has(c) + return bs.read.Has(ctx, c) } func (bs *BufferedBlockstore) HashOnRead(hor bool) { @@ -165,8 +165,8 @@ func (bs *BufferedBlockstore) HashOnRead(hor bool) { bs.write.HashOnRead(hor) } -func (bs *BufferedBlockstore) PutMany(blks []block.Block) error { - return bs.write.PutMany(blks) +func (bs *BufferedBlockstore) PutMany(ctx context.Context, blks []block.Block) error { + return bs.write.PutMany(ctx, blks) } func (bs *BufferedBlockstore) Read() Blockstore { diff --git a/blockstore/discard.go b/blockstore/discard.go index afd0651bc..575c752d4 100644 --- a/blockstore/discard.go +++ b/blockstore/discard.go @@ -18,39 +18,39 @@ func NewDiscardStore(bs Blockstore) Blockstore { return &discardstore{bs: bs} } -func (b *discardstore) Has(cid cid.Cid) (bool, error) { - return b.bs.Has(cid) +func (b *discardstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + return b.bs.Has(ctx, cid) } func (b *discardstore) HashOnRead(hor bool) { b.bs.HashOnRead(hor) } -func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { - return b.bs.Get(cid) +func (b *discardstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(ctx, cid) } -func (b *discardstore) GetSize(cid cid.Cid) (int, error) { - return b.bs.GetSize(cid) +func (b *discardstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + return b.bs.GetSize(ctx, cid) } -func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { - return b.bs.View(cid, f) +func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + return b.bs.View(ctx, cid, f) } -func (b *discardstore) Put(blk blocks.Block) error { +func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error { return nil } -func (b *discardstore) PutMany(blks []blocks.Block) error { +func (b *discardstore) PutMany(ctx context.Context, blks []blocks.Block) error { return nil } -func (b *discardstore) DeleteBlock(cid cid.Cid) error { +func (b *discardstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return nil } -func (b *discardstore) DeleteMany(cids []cid.Cid) error { +func (b *discardstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { return nil } diff --git a/blockstore/fallback.go b/blockstore/fallback.go index 5f220f941..3d0acd36d 100644 --- a/blockstore/fallback.go +++ b/blockstore/fallback.go @@ -71,14 +71,14 @@ func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { // chain bitswap puts blocks in temp blockstore which is cleaned up // every few min (to drop any messages we fetched but don't want) // in this case we want to keep this block around - if err := fbs.Put(b); err != nil { + if err := fbs.Put(ctx, b); err != nil { return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err) } return b, nil } -func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { - b, err := fbs.Blockstore.Get(c) +func (fbs *FallbackStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + b, err := fbs.Blockstore.Get(ctx, c) switch err { case nil: return b, nil @@ -89,8 +89,8 @@ func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { } } -func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { - sz, err := fbs.Blockstore.GetSize(c) +func (fbs *FallbackStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + sz, err := fbs.Blockstore.GetSize(ctx, c) switch err { case nil: return sz, nil diff --git a/blockstore/idstore.go b/blockstore/idstore.go index e6148ff04..c6281998a 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -38,7 +38,7 @@ func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) { return false, nil, err } -func (b *idstore) Has(cid cid.Cid) (bool, error) { +func (b *idstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { inline, _, err := decodeCid(cid) if err != nil { return false, xerrors.Errorf("error decoding Cid: %w", err) @@ -48,10 +48,10 @@ func (b *idstore) Has(cid cid.Cid) (bool, error) { return true, nil } - return b.bs.Has(cid) + return b.bs.Has(ctx, cid) } -func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *idstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { inline, data, err := decodeCid(cid) if err != nil { return nil, xerrors.Errorf("error decoding Cid: %w", err) @@ -61,10 +61,10 @@ func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } - return b.bs.Get(cid) + return b.bs.Get(ctx, cid) } -func (b *idstore) GetSize(cid cid.Cid) (int, error) { +func (b *idstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { inline, data, err := decodeCid(cid) if err != nil { return 0, xerrors.Errorf("error decoding Cid: %w", err) @@ -74,10 +74,10 @@ func (b *idstore) GetSize(cid cid.Cid) (int, error) { return len(data), err } - return b.bs.GetSize(cid) + return b.bs.GetSize(ctx, cid) } -func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { +func (b *idstore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { inline, data, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -87,10 +87,10 @@ func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { return cb(data) } - return b.bs.View(cid, cb) + return b.bs.View(ctx, cid, cb) } -func (b *idstore) Put(blk blocks.Block) error { +func (b *idstore) Put(ctx context.Context, blk blocks.Block) error { inline, _, err := decodeCid(blk.Cid()) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -100,10 +100,10 @@ func (b *idstore) Put(blk blocks.Block) error { return nil } - return b.bs.Put(blk) + return b.bs.Put(ctx, blk) } -func (b *idstore) PutMany(blks []blocks.Block) error { +func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error { toPut := make([]blocks.Block, 0, len(blks)) for _, blk := range blks { inline, _, err := decodeCid(blk.Cid()) @@ -118,13 +118,13 @@ func (b *idstore) PutMany(blks []blocks.Block) error { } if len(toPut) > 0 { - return b.bs.PutMany(toPut) + return b.bs.PutMany(ctx, toPut) } return nil } -func (b *idstore) DeleteBlock(cid cid.Cid) error { +func (b *idstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { inline, _, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -134,10 +134,10 @@ func (b *idstore) DeleteBlock(cid cid.Cid) error { return nil } - return b.bs.DeleteBlock(cid) + return b.bs.DeleteBlock(ctx, cid) } -func (b *idstore) DeleteMany(cids []cid.Cid) error { +func (b *idstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { toDelete := make([]cid.Cid, 0, len(cids)) for _, cid := range cids { inline, _, err := decodeCid(cid) @@ -152,7 +152,7 @@ func (b *idstore) DeleteMany(cids []cid.Cid) error { } if len(toDelete) > 0 { - return b.bs.DeleteMany(toDelete) + return b.bs.DeleteMany(ctx, toDelete) } return nil diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 51b4bd951..787c71d7d 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -79,12 +79,12 @@ func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onl return Adapt(bs), nil } -func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error { +func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return xerrors.Errorf("not supported") } -func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { - _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -99,8 +99,8 @@ func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { return true, nil } -func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { - rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + rd, err := i.api.Block().Get(ctx, path.IpldPath(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) } @@ -113,8 +113,8 @@ func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { - st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + st, err := i.api.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) } @@ -122,23 +122,23 @@ func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { return st.Size(), nil } -func (i *IPFSBlockstore) Put(block blocks.Block) error { +func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error { mhd, err := multihash.Decode(block.Cid().Hash()) if err != nil { return err } - _, err = i.api.Block().Put(i.ctx, bytes.NewReader(block.RawData()), + _, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()), options.Block.Hash(mhd.Code, mhd.Length), options.Block.Format(cid.CodecToStr[block.Cid().Type()])) return err } -func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error { +func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { // TODO: could be done in parallel for _, block := range blocks { - if err := i.Put(block); err != nil { + if err := i.Put(ctx, block); err != nil { return err } } diff --git a/blockstore/mem.go b/blockstore/mem.go index 8ea69d46a..d6b14f002 100644 --- a/blockstore/mem.go +++ b/blockstore/mem.go @@ -15,24 +15,24 @@ func NewMemory() MemBlockstore { // MemBlockstore is a terminal blockstore that keeps blocks in memory. type MemBlockstore map[cid.Cid]blocks.Block -func (m MemBlockstore) DeleteBlock(k cid.Cid) error { +func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { delete(m, k) return nil } -func (m MemBlockstore) DeleteMany(ks []cid.Cid) error { +func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { for _, k := range ks { delete(m, k) } return nil } -func (m MemBlockstore) Has(k cid.Cid) (bool, error) { +func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } -func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { b, ok := m[k] if !ok { return ErrNotFound @@ -40,7 +40,7 @@ func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { return callback(b.RawData()) } -func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { return nil, ErrNotFound @@ -49,7 +49,7 @@ func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { } // GetSize returns the CIDs mapped BlockSize -func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { +func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { b, ok := m[k] if !ok { return 0, ErrNotFound @@ -58,7 +58,7 @@ func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { } // Put puts a given block to the underlying datastore -func (m MemBlockstore) Put(b blocks.Block) error { +func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error { // Convert to a basic block for safety, but try to reuse the existing // block if it's already a basic block. k := b.Cid() @@ -76,9 +76,9 @@ func (m MemBlockstore) Put(b blocks.Block) error { // PutMany puts a slice of blocks at the same time using batching // capabilities of the underlying datastore whenever possible. -func (m MemBlockstore) PutMany(bs []blocks.Block) error { +func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { for _, b := range bs { - _ = m.Put(b) // can't fail + _ = m.Put(ctx, b) // can't fail } return nil } diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 0e34fe952..f6715ea33 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -18,6 +18,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" @@ -47,6 +49,9 @@ var ( enableDebugLog = false // set this to true if you want to track origin stack traces in the write log enableDebugLogWriteTraces = false + + // upgradeBoundary is the boundary before and after an upgrade where we suppress compaction + upgradeBoundary = build.Finality ) func init() { @@ -98,6 +103,12 @@ type ChainAccessor interface { SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) } +// upgradeRange is a precomputed epoch range during which we shouldn't compact so as to not +// interfere with an upgrade +type upgradeRange struct { + start, end abi.ChainEpoch +} + // hotstore is the interface that must be satisfied by the hot blockstore; it is an extension // of the Blockstore interface with the traits we need for compaction. type hotstore interface { @@ -125,6 +136,8 @@ type SplitStore struct { cold bstore.Blockstore hot hotstore + upgrades []upgradeRange + markSetEnv MarkSetEnv markSetSize int64 @@ -203,17 +216,17 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co } // Blockstore interface -func (s *SplitStore) DeleteBlock(_ cid.Cid) error { +func (s *SplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) DeleteMany(_ []cid.Cid) error { +func (s *SplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) Has(cid cid.Cid) (bool, error) { +func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if isIdentiyCid(cid) { return true, nil } @@ -221,7 +234,7 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - has, err := s.hot.Has(cid) + has, err := s.hot.Has(ctx, cid) if err != nil { return has, err @@ -232,10 +245,10 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { return true, nil } - return s.cold.Has(cid) + return s.cold.Has(ctx, cid) } -func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { +func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -248,7 +261,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - blk, err := s.hot.Get(cid) + blk, err := s.hot.Get(ctx, cid) switch err { case nil: @@ -260,7 +273,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.debug.LogReadMiss(cid) } - blk, err = s.cold.Get(cid) + blk, err = s.cold.Get(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) @@ -272,7 +285,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { } } -func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { +func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -285,7 +298,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - size, err := s.hot.GetSize(cid) + size, err := s.hot.GetSize(ctx, cid) switch err { case nil: @@ -297,7 +310,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.debug.LogReadMiss(cid) } - size, err = s.cold.GetSize(cid) + size, err = s.cold.GetSize(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -308,7 +321,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { } } -func (s *SplitStore) Put(blk blocks.Block) error { +func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { if isIdentiyCid(blk.Cid()) { return nil } @@ -316,7 +329,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.Put(blk) + err := s.hot.Put(ctx, blk) if err != nil { return err } @@ -327,7 +340,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { return nil } -func (s *SplitStore) PutMany(blks []blocks.Block) error { +func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { // filter identites idcids := 0 for _, blk := range blks { @@ -361,7 +374,7 @@ func (s *SplitStore) PutMany(blks []blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.PutMany(blks) + err := s.hot.PutMany(ctx, blks) if err != nil { return err } @@ -417,7 +430,7 @@ func (s *SplitStore) HashOnRead(enabled bool) { s.cold.HashOnRead(enabled) } -func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { +func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -438,14 +451,14 @@ func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { s.protectView(cid) defer s.viewDone() - err := s.hot.View(cid, cb) + err := s.hot.View(ctx, cid, cb) switch err { case bstore.ErrNotFound: if s.isWarm() { s.debug.LogReadMiss(cid) } - err = s.cold.View(cid, cb) + err = s.cold.View(ctx, cid, cb) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -463,16 +476,33 @@ func (s *SplitStore) isWarm() bool { } // State tracking -func (s *SplitStore) Start(chain ChainAccessor) error { +func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error { s.chain = chain curTs := chain.GetHeaviestTipSet() + // precompute the upgrade boundaries + s.upgrades = make([]upgradeRange, 0, len(us)) + for _, upgrade := range us { + boundary := upgrade.Height + for _, pre := range upgrade.PreMigrations { + preMigrationBoundary := upgrade.Height - pre.StartWithin + if preMigrationBoundary < boundary { + boundary = preMigrationBoundary + } + } + + upgradeStart := boundary - upgradeBoundary + upgradeEnd := upgrade.Height + upgradeBoundary + + s.upgrades = append(s.upgrades, upgradeRange{start: upgradeStart, end: upgradeEnd}) + } + // should we warmup warmup := false // load base epoch from metadata ds // if none, then use current epoch because it's a fresh start - bs, err := s.ds.Get(baseEpochKey) + bs, err := s.ds.Get(s.ctx, baseEpochKey) switch err { case nil: s.baseEpoch = bytesToEpoch(bs) @@ -493,7 +523,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load warmup epoch from metadata ds - bs, err = s.ds.Get(warmupEpochKey) + bs, err = s.ds.Get(s.ctx, warmupEpochKey) switch err { case nil: s.warmupEpoch = bytesToEpoch(bs) @@ -506,7 +536,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load markSetSize from metadata ds to provide a size hint for marksets - bs, err = s.ds.Get(markSetSizeKey) + bs, err = s.ds.Get(s.ctx, markSetSizeKey) switch err { case nil: s.markSetSize = bytesToInt64(bs) @@ -517,7 +547,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc - bs, err = s.ds.Get(compactionIndexKey) + bs, err = s.ds.Get(s.ctx, compactionIndexKey) switch err { case nil: s.compactionIndex = bytesToInt64(bs) @@ -579,5 +609,5 @@ func (s *SplitStore) checkClosing() error { func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { s.baseEpoch = epoch - return s.ds.Put(baseEpochKey, epochToBytes(epoch)) + return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch)) } diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 8907abf9e..c83ed7b28 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -96,7 +96,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return errStopWalk } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking hotstore: %w", err) } @@ -105,7 +105,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return nil } - has, err = s.cold.Has(c) + has, err = s.cold.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking coldstore: %w", err) } diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 4ff38a5fb..13ab90ac0 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -99,6 +99,12 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } + if s.isNearUpgrade(epoch) { + // we are near an upgrade epoch, suppress compaction + atomic.StoreInt32(&s.compacting, 0) + return nil + } + if epoch-s.baseEpoch > CompactionThreshold { // it's time to compact -- prepare the transaction and go! s.beginTxnProtect() @@ -121,6 +127,16 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } +func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool { + for _, upgrade := range s.upgrades { + if epoch >= upgrade.start && epoch <= upgrade.end { + return true + } + } + + return false +} + // transactionally protect incoming tipsets func (s *SplitStore) protectTipSets(apply []*types.TipSet) { s.txnLk.RLock() @@ -561,13 +577,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error saving base epoch: %w", err) } - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { return xerrors.Errorf("error saving mark set size: %w", err) } s.compactionIndex++ - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } @@ -819,10 +835,10 @@ func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { return cb(data) } - err := s.hot.View(c, cb) + err := s.hot.View(s.ctx, c, cb) switch err { case bstore.ErrNotFound: - return s.cold.View(c, cb) + return s.cold.View(s.ctx, c, cb) default: return err @@ -834,13 +850,13 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) { return true, nil } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if has || err != nil { return has, err } - return s.cold.Has(c) + return s.cold.Has(s.ctx, c) } func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { @@ -851,7 +867,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { return err } - blk, err := s.hot.Get(c) + blk, err := s.hot.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { log.Warnf("hotstore missing block %s", c) @@ -863,7 +879,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { batch = append(batch, blk) if len(batch) == batchSize { - err = s.cold.PutMany(batch) + err = s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -872,7 +888,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { } if len(batch) > 0 { - err := s.cold.PutMany(batch) + err := s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -1042,7 +1058,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { deadCids = append(deadCids, c) } - err := s.hot.DeleteMany(deadCids) + err := s.hot.DeleteMany(s.ctx, deadCids) if err != nil { return xerrors.Errorf("error purging cold objects: %w", err) } diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go index 1065e460c..6f838229d 100644 --- a/blockstore/splitstore/splitstore_expose.go +++ b/blockstore/splitstore/splitstore_expose.go @@ -20,28 +20,28 @@ func (s *SplitStore) Expose() bstore.Blockstore { return &exposedSplitStore{s: s} } -func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { +func (es *exposedSplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { return errors.New("DeleteBlock: operation not supported") } -func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { +func (es *exposedSplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { return errors.New("DeleteMany: operation not supported") } -func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { +func (es *exposedSplitStore) Has(ctx context.Context, c cid.Cid) (bool, error) { if isIdentiyCid(c) { return true, nil } - has, err := es.s.hot.Has(c) + has, err := es.s.hot.Has(ctx, c) if has || err != nil { return has, err } - return es.s.cold.Has(c) + return es.s.cold.Has(ctx, c) } -func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { +func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -51,16 +51,16 @@ func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, c) } - blk, err := es.s.hot.Get(c) + blk, err := es.s.hot.Get(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.Get(c) + return es.s.cold.Get(ctx, c) default: return blk, err } } -func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { +func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -70,21 +70,21 @@ func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { return len(data), nil } - size, err := es.s.hot.GetSize(c) + size, err := es.s.hot.GetSize(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.GetSize(c) + return es.s.cold.GetSize(ctx, c) default: return size, err } } -func (es *exposedSplitStore) Put(blk blocks.Block) error { - return es.s.Put(blk) +func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error { + return es.s.Put(ctx, blk) } -func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { - return es.s.PutMany(blks) +func (es *exposedSplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { + return es.s.PutMany(ctx, blks) } func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { @@ -93,7 +93,7 @@ func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, e func (es *exposedSplitStore) HashOnRead(enabled bool) {} -func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { +func (es *exposedSplitStore) View(ctx context.Context, c cid.Cid, f func([]byte) error) error { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -103,10 +103,10 @@ func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { return f(data) } - err := es.s.hot.View(c, f) + err := es.s.hot.View(ctx, c, f) switch err { case bstore.ErrNotFound: - return es.s.cold.View(c, f) + return es.s.cold.View(ctx, c, f) default: return err diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index df9984d41..7d84e0a4c 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" @@ -29,6 +30,7 @@ func init() { } func testSplitStore(t *testing.T, cfg *Config) { + ctx := context.Background() chain := &mockChain{t: t} // the myriads of stores @@ -38,7 +40,7 @@ func testSplitStore(t *testing.T, cfg *Config) { // this is necessary to avoid the garbage mock puts in the blocks garbage := blocks.NewBlock([]byte{1, 2, 3}) - err := cold.Put(garbage) + err := cold.Put(ctx, garbage) if err != nil { t.Fatal(err) } @@ -59,21 +61,21 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } - err = cold.Put(blk) + err = cold.Put(ctx, blk) if err != nil { t.Fatal(err) } // create a garbage block that is protected with a rgistered protector protected := blocks.NewBlock([]byte("protected!")) - err = hot.Put(protected) + err = hot.Put(ctx, protected) if err != nil { t.Fatal(err) } // and another one that is not protected unprotected := blocks.NewBlock([]byte("unprotected!")) - err = hot.Put(unprotected) + err = hot.Put(ctx, unprotected) if err != nil { t.Fatal(err) } @@ -90,7 +92,7 @@ func testSplitStore(t *testing.T, cfg *Config) { return protect(protected.Cid()) }) - err = ss.Start(chain) + err = ss.Start(chain, nil) if err != nil { t.Fatal(err) } @@ -108,11 +110,11 @@ func testSplitStore(t *testing.T, cfg *Config) { if err != nil { t.Fatal(err) } - err = ss.Put(stateRoot) + err = ss.Put(ctx, stateRoot) if err != nil { t.Fatal(err) } - err = ss.Put(sblk) + err = ss.Put(ctx, sblk) if err != nil { t.Fatal(err) } @@ -175,7 +177,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our protected block is still there - has, err := hot.Has(protected.Cid()) + has, err := hot.Has(ctx, protected.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our unprotected block is in the coldstore now - has, err = hot.Has(unprotected.Cid()) + has, err = hot.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -194,7 +196,7 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal("unprotected block is still in hotstore") } - has, err = cold.Has(unprotected.Cid()) + has, err = cold.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -220,6 +222,141 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) { testSplitStore(t, &Config{MarkSetType: "badger"}) } +func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { + ctx := context.Background() + chain := &mockChain{t: t} + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(ctx, garbage) + if err != nil { + t.Fatal(err) + } + + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + + genTs := mock.TipSet(genBlock) + chain.push(genTs) + + // put the genesis block to cold store + blk, err := genBlock.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + err = cold.Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + + // open the splitstore + ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + // create an upgrade schedule that will suppress compaction during the test + upgradeBoundary = 0 + upgrade := stmgr.Upgrade{ + Height: 10, + PreMigrations: []stmgr.PreMigration{{StartWithin: 10}}, + } + + err = ss.Start(chain, []stmgr.Upgrade{upgrade}) + if err != nil { + t.Fatal(err) + } + + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, stateRoot) + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, sblk) + if err != nil { + t.Fatal(err) + } + ts := mock.TipSet(blk) + chain.push(ts) + + return ts + } + + waitForCompaction := func() { + for atomic.LoadInt32(&ss.compacting) == 1 { + time.Sleep(100 * time.Millisecond) + } + } + + curTs := genTs + for i := 1; i < 10; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + countBlocks := func(bs blockstore.Blockstore) int { + count := 0 + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { + count++ + return nil + }) + return count + } + + // we should not have compacted due to suppression and everything should still be hot + hotCnt := countBlocks(hot) + coldCnt := countBlocks(cold) + + if hotCnt != 20 { + t.Errorf("expected %d blocks, but got %d", 20, hotCnt) + } + + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) + } + + // put some more blocks, now we should compact + for i := 10; i < 20; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + hotCnt = countBlocks(hot) + coldCnt = countBlocks(cold) + + if hotCnt != 24 { + t.Errorf("expected %d blocks, but got %d", 24, hotCnt) + } + + if coldCnt != 18 { + t.Errorf("expected %d blocks, but got %d", 18, coldCnt) + } +} + type mockChain struct { t testing.TB @@ -296,7 +433,7 @@ func newMockStore() *mockStore { return &mockStore{set: make(map[cid.Cid]blocks.Block)} } -func (b *mockStore) Has(cid cid.Cid) (bool, error) { +func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { b.mx.Lock() defer b.mx.Unlock() _, ok := b.set[cid] @@ -305,7 +442,7 @@ func (b *mockStore) Has(cid cid.Cid) (bool, error) { func (b *mockStore) HashOnRead(hor bool) {} -func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { b.mx.Lock() defer b.mx.Unlock() @@ -316,8 +453,8 @@ func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { return blk, nil } -func (b *mockStore) GetSize(cid cid.Cid) (int, error) { - blk, err := b.Get(cid) +func (b *mockStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + blk, err := b.Get(ctx, cid) if err != nil { return 0, err } @@ -325,15 +462,15 @@ func (b *mockStore) GetSize(cid cid.Cid) (int, error) { return len(blk.RawData()), nil } -func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { - blk, err := b.Get(cid) +func (b *mockStore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(ctx, cid) if err != nil { return err } return f(blk.RawData()) } -func (b *mockStore) Put(blk blocks.Block) error { +func (b *mockStore) Put(_ context.Context, blk blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -341,7 +478,7 @@ func (b *mockStore) Put(blk blocks.Block) error { return nil } -func (b *mockStore) PutMany(blks []blocks.Block) error { +func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -351,7 +488,7 @@ func (b *mockStore) PutMany(blks []blocks.Block) error { return nil } -func (b *mockStore) DeleteBlock(cid cid.Cid) error { +func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() @@ -359,7 +496,7 @@ func (b *mockStore) DeleteBlock(cid cid.Cid) error { return nil } -func (b *mockStore) DeleteMany(cids []cid.Cid) error { +func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 216de571a..2a39f6c9d 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -75,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { count++ - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return err } @@ -84,7 +84,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { return nil } - blk, err := s.cold.Get(c) + blk, err := s.cold.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { missing++ @@ -97,7 +97,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { batchHot = append(batchHot, blk) if len(batchHot) == batchSize { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { return err } @@ -112,7 +112,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { } if len(batchHot) > 0 { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { return err } @@ -121,13 +121,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) s.markSetSize = count + count>>2 // overestimate a bit - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { log.Warnf("error saving mark set size: %s", err) } // save the warmup epoch - err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + err = s.ds.Put(s.ctx, warmupEpochKey, epochToBytes(epoch)) if err != nil { return xerrors.Errorf("error saving warm up epoch: %w", err) } @@ -136,7 +136,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { s.mx.Unlock() // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } diff --git a/blockstore/sync.go b/blockstore/sync.go index 848ccd19d..1b4ad8297 100644 --- a/blockstore/sync.go +++ b/blockstore/sync.go @@ -20,53 +20,53 @@ type SyncBlockstore struct { bs MemBlockstore // specifically use a memStore to save indirection overhead. } -func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error { +func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteBlock(k) + return m.bs.DeleteBlock(ctx, k) } -func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error { +func (m *SyncBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteMany(ks) + return m.bs.DeleteMany(ctx, ks) } -func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) { +func (m *SyncBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Has(k) + return m.bs.Has(ctx, k) } -func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m *SyncBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.View(k, callback) + return m.bs.View(ctx, k, callback) } -func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m *SyncBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Get(k) + return m.bs.Get(ctx, k) } -func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) { +func (m *SyncBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.GetSize(k) + return m.bs.GetSize(ctx, k) } -func (m *SyncBlockstore) Put(b blocks.Block) error { +func (m *SyncBlockstore) Put(ctx context.Context, b blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.Put(b) + return m.bs.Put(ctx, b) } -func (m *SyncBlockstore) PutMany(bs []blocks.Block) error { +func (m *SyncBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.PutMany(bs) + return m.bs.PutMany(ctx, bs) } func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed.go b/blockstore/timed.go index b279943b6..8f226b0b1 100644 --- a/blockstore/timed.go +++ b/blockstore/timed.go @@ -92,28 +92,28 @@ func (t *TimedCacheBlockstore) rotate() { t.mu.Unlock() } -func (t *TimedCacheBlockstore) Put(b blocks.Block) error { +func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. t.mu.Lock() defer t.mu.Unlock() - return t.active.Put(b) + return t.active.Put(ctx, b) } -func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error { +func (t *TimedCacheBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { t.mu.Lock() defer t.mu.Unlock() - return t.active.PutMany(bs) + return t.active.PutMany(ctx, bs) } -func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (t *TimedCacheBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { // The underlying blockstore is always a "mem" blockstore so there's no difference, // from a performance perspective, between view & get. So we call Get to avoid // calling an arbitrary callback while holding a lock. t.mu.RLock() - block, err := t.active.Get(k) + block, err := t.active.Get(ctx, k) if err == ErrNotFound { - block, err = t.inactive.Get(k) + block, err = t.inactive.Get(ctx, k) } t.mu.RUnlock() @@ -123,51 +123,51 @@ func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) erro return callback(block.RawData()) } -func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (t *TimedCacheBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { t.mu.RLock() defer t.mu.RUnlock() - b, err := t.active.Get(k) + b, err := t.active.Get(ctx, k) if err == ErrNotFound { - b, err = t.inactive.Get(k) + b, err = t.inactive.Get(ctx, k) } return b, err } -func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) { +func (t *TimedCacheBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { t.mu.RLock() defer t.mu.RUnlock() - size, err := t.active.GetSize(k) + size, err := t.active.GetSize(ctx, k) if err == ErrNotFound { - size, err = t.inactive.GetSize(k) + size, err = t.inactive.GetSize(ctx, k) } return size, err } -func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) { +func (t *TimedCacheBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { t.mu.RLock() defer t.mu.RUnlock() - if has, err := t.active.Has(k); err != nil { + if has, err := t.active.Has(ctx, k); err != nil { return false, err } else if has { return true, nil } - return t.inactive.Has(k) + return t.inactive.Has(ctx, k) } func (t *TimedCacheBlockstore) HashOnRead(_ bool) { // no-op } -func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) + return multierr.Combine(t.active.DeleteBlock(ctx, k), t.inactive.DeleteBlock(ctx, k)) } -func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks)) + return multierr.Combine(t.active.DeleteMany(ctx, ks), t.inactive.DeleteMany(ctx, ks)) } func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed_test.go b/blockstore/timed_test.go index d5fefff94..16795f047 100644 --- a/blockstore/timed_test.go +++ b/blockstore/timed_test.go @@ -19,6 +19,8 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { tc.clock = mClock tc.doneRotatingCh = make(chan struct{}) + ctx := context.Background() + _ = tc.Start(context.Background()) mClock.Add(1) // IDK why it is needed but it makes it work @@ -27,18 +29,18 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { }() b1 := blocks.NewBlock([]byte("foo")) - require.NoError(t, tc.Put(b1)) + require.NoError(t, tc.Put(ctx, b1)) b2 := blocks.NewBlock([]byte("bar")) - require.NoError(t, tc.Put(b2)) + require.NoError(t, tc.Put(ctx, b2)) b3 := blocks.NewBlock([]byte("baz")) - b1out, err := tc.Get(b1.Cid()) + b1out, err := tc.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), b1out.RawData()) - has, err := tc.Has(b1.Cid()) + has, err := tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) @@ -46,17 +48,17 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // We should still have everything. - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) // extend b2, add b3. - require.NoError(t, tc.Put(b2)) - require.NoError(t, tc.Put(b3)) + require.NoError(t, tc.Put(ctx, b2)) + require.NoError(t, tc.Put(ctx, b3)) // all keys once. allKeys, err := tc.AllKeysChan(context.Background()) @@ -71,15 +73,15 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // should still have b2, and b3, but not b1 - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.False(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b3.Cid()) + has, err = tc.Has(ctx, b3.Cid()) require.NoError(t, err) require.True(t, has) } diff --git a/blockstore/union.go b/blockstore/union.go index a99ba2591..f54a86590 100644 --- a/blockstore/union.go +++ b/blockstore/union.go @@ -19,72 +19,72 @@ func Union(stores ...Blockstore) Blockstore { return unionBlockstore(stores) } -func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) { +func (m unionBlockstore) Has(ctx context.Context, cid cid.Cid) (has bool, err error) { for _, bs := range m { - if has, err = bs.Has(cid); has || err != nil { + if has, err = bs.Has(ctx, cid); has || err != nil { break } } return has, err } -func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) { +func (m unionBlockstore) Get(ctx context.Context, cid cid.Cid) (blk blocks.Block, err error) { for _, bs := range m { - if blk, err = bs.Get(cid); err == nil || err != ErrNotFound { + if blk, err = bs.Get(ctx, cid); err == nil || err != ErrNotFound { break } } return blk, err } -func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) { +func (m unionBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) (err error) { for _, bs := range m { - if err = bs.View(cid, callback); err == nil || err != ErrNotFound { + if err = bs.View(ctx, cid, callback); err == nil || err != ErrNotFound { break } } return err } -func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) { +func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, err error) { for _, bs := range m { - if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound { + if size, err = bs.GetSize(ctx, cid); err == nil || err != ErrNotFound { break } } return size, err } -func (m unionBlockstore) Put(block blocks.Block) (err error) { +func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) { for _, bs := range m { - if err = bs.Put(block); err != nil { + if err = bs.Put(ctx, block); err != nil { break } } return err } -func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) { +func (m unionBlockstore) PutMany(ctx context.Context, blks []blocks.Block) (err error) { for _, bs := range m { - if err = bs.PutMany(blks); err != nil { + if err = bs.PutMany(ctx, blks); err != nil { break } } return err } -func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) { +func (m unionBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteBlock(cid); err != nil { + if err = bs.DeleteBlock(ctx, cid); err != nil { break } } return err } -func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) { +func (m unionBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteMany(cids); err != nil { + if err = bs.DeleteMany(ctx, cids); err != nil { break } } diff --git a/blockstore/union_test.go b/blockstore/union_test.go index b62026892..3ae8c1d49 100644 --- a/blockstore/union_test.go +++ b/blockstore/union_test.go @@ -15,79 +15,81 @@ var ( ) func TestUnionBlockstore_Get(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() - _ = m1.Put(b1) - _ = m2.Put(b2) + _ = m1.Put(ctx, b1) + _ = m2.Put(ctx, b2) u := Union(m1, m2) - v1, err := u.Get(b1.Cid()) + v1, err := u.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), v1.RawData()) - v2, err := u.Get(b2.Cid()) + v2, err := u.Get(ctx, b2.Cid()) require.NoError(t, err) require.Equal(t, b2.RawData(), v2.RawData()) } func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() u := Union(m1, m2) - err := u.Put(b0) + err := u.Put(ctx, b0) require.NoError(t, err) var has bool // write was broadcasted to all stores. - has, _ = m1.Has(b0.Cid()) + has, _ = m1.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = m2.Has(b0.Cid()) + has, _ = m2.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = u.Has(b0.Cid()) + has, _ = u.Has(ctx, b0.Cid()) require.True(t, has) // put many. - err = u.PutMany([]blocks.Block{b1, b2}) + err = u.PutMany(ctx, []blocks.Block{b1, b2}) require.NoError(t, err) // write was broadcasted to all stores. - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m1.Has(b2.Cid()) + has, _ = m1.Has(ctx, b2.Cid()) require.True(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m2.Has(b2.Cid()) + has, _ = m2.Has(ctx, b2.Cid()) require.True(t, has) // also in the union store. - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = u.Has(b2.Cid()) + has, _ = u.Has(ctx, b2.Cid()) require.True(t, has) // deleted from all stores. - err = u.DeleteBlock(b1.Cid()) + err = u.DeleteBlock(ctx, b1.Cid()) require.NoError(t, err) - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.False(t, has) // check that AllKeysChan returns b0 and b2, twice (once per backing store) diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 1578d7465..4bb7835d2 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 335e47cc7..792ad55b9 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 98a3986a2..6bd89b2b4 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index b9db0a467..84023c38c 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -47,6 +47,8 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeSnapDealsHeight = abi.ChainEpoch(-18) + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 9a0018e73..e26fd78fa 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -41,6 +41,7 @@ const UpgradeNorwegianHeight = -14 const UpgradeTurboHeight = -15 const UpgradeHyperdriveHeight = -16 const UpgradeChocolateHeight = 6360 +const UpgradeSnapDealsHeight = 99999999 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 8cd99d642..16d77c7e6 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -54,6 +54,8 @@ const UpgradeHyperdriveHeight = 420 const UpgradeChocolateHeight = 312746 +const UpgradeSnapDealsHeight = 99999999 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) policy.SetSupportedProofTypes( diff --git a/build/params_interop.go b/build/params_interop.go index de5ee9a12..66033937c 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -47,6 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15) var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeSnapDealsHeight = abi.ChainEpoch(-18) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 0c8c53ba8..a4781f1ff 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -62,18 +62,20 @@ const UpgradeNorwegianHeight = 665280 const UpgradeTurboHeight = 712320 // 2021-06-30T22:00:00Z -var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) +const UpgradeHyperdriveHeight = 892800 // 2021-10-26T13:30:00Z -var UpgradeChocolateHeight = abi.ChainEpoch(1231620) +const UpgradeChocolateHeight = 1231620 + +var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999) func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" { - UpgradeChocolateHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" { + UpgradeSnapDealsHeight = math.MaxInt64 } Devnet = false diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 0a242f6f2..704c84639 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -34,7 +34,7 @@ const NewestNetworkVersion = network.Version{{.latestNetworkVersion}} /* inline-gen start */ -const NewestNetworkVersion = network.Version14 +const NewestNetworkVersion = network.Version15 /* inline-gen end */ diff --git a/build/params_testground.go b/build/params_testground.go index 48b76f82c..539e06b45 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -99,6 +99,7 @@ var ( UpgradeTurboHeight abi.ChainEpoch = -14 UpgradeHyperdriveHeight abi.ChainEpoch = -15 UpgradeChocolateHeight abi.ChainEpoch = -16 + UpgradeSnapDealsHeight abi.ChainEpoch = -17 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/version.go b/build/version.go index cf1cd9a52..3656dc9ad 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.13.2-dev" +const BuildVersion = "1.13.3-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 249ce133f..57ea510bb 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -23,6 +23,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -50,6 +52,10 @@ func init() { builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var Methods = builtin4.MethodsAccount @@ -75,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.AccountActorCodeID: return load6(store, act.Head) + case builtin7.AccountActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -100,6 +109,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, case actors.Version6: return make6(store, addr) + case actors.Version7: + return make7(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -125,6 +137,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.AccountActorCodeID, nil + case actors.Version7: + return builtin7.AccountActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/account/v7.go b/chain/actors/builtin/account/v7.go new file mode 100644 index 000000000..883776cf8 --- /dev/null +++ b/chain/actors/builtin/account/v7.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, addr address.Address) (State, error) { + out := state7{store: store} + out.State = account7.State{Address: addr} + return &out, nil +} + +type state7 struct { + account7.State + store adt.Store +} + +func (s *state7) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index ebfe2df2e..d93732999 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -23,46 +23,49 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) -var SystemActorAddr = builtin6.SystemActorAddr -var BurntFundsActorAddr = builtin6.BurntFundsActorAddr -var CronActorAddr = builtin6.CronActorAddr +var SystemActorAddr = builtin7.SystemActorAddr +var BurntFundsActorAddr = builtin7.BurntFundsActorAddr +var CronActorAddr = builtin7.CronActorAddr var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") var ( - ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch + ExpectedLeadersPerEpoch = builtin7.ExpectedLeadersPerEpoch ) const ( - EpochDurationSeconds = builtin6.EpochDurationSeconds - EpochsInDay = builtin6.EpochsInDay - SecondsInDay = builtin6.SecondsInDay + EpochDurationSeconds = builtin7.EpochDurationSeconds + EpochsInDay = builtin7.EpochsInDay + SecondsInDay = builtin7.SecondsInDay ) const ( - MethodSend = builtin6.MethodSend - MethodConstructor = builtin6.MethodConstructor + MethodSend = builtin7.MethodSend + MethodConstructor = builtin7.MethodConstructor ) // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. -type SectorInfo = proof6.SectorInfo -type PoStProof = proof6.PoStProof +type SectorInfo = proof7.SectorInfo +type PoStProof = proof7.PoStProof type FilterEstimate = smoothing0.FilterEstimate func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) + return miner7.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) } func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { @@ -101,6 +104,12 @@ func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate { } +func FromV7FilterEstimate(v7 smoothing7.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v7) + +} + type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) @@ -138,6 +147,9 @@ func ActorNameByCode(c cid.Cid) string { case builtin6.IsBuiltinActor(c): return builtin6.ActorNameByCode(c) + case builtin7.IsBuiltinActor(c): + return builtin7.ActorNameByCode(c) + default: return "" } @@ -169,6 +181,10 @@ func IsBuiltinActor(c cid.Cid) bool { return true } + if builtin7.IsBuiltinActor(c) { + return true + } + return false } @@ -198,6 +214,10 @@ func IsAccountActor(c cid.Cid) bool { return true } + if c == builtin7.AccountActorCodeID { + return true + } + return false } @@ -227,6 +247,10 @@ func IsStorageMinerActor(c cid.Cid) bool { return true } + if c == builtin7.StorageMinerActorCodeID { + return true + } + return false } @@ -256,6 +280,10 @@ func IsMultisigActor(c cid.Cid) bool { return true } + if c == builtin7.MultisigActorCodeID { + return true + } + return false } @@ -285,6 +313,10 @@ func IsPaymentChannelActor(c cid.Cid) bool { return true } + if c == builtin7.PaymentChannelActorCodeID { + return true + } + return false } diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 9178a44ab..f27a14ac7 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -17,6 +17,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -40,6 +42,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -65,14 +70,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.CronActorCodeID, nil + case actors.Version7: + return builtin7.CronActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin6.CronActorAddr - Methods = builtin6.MethodsCron + Address = builtin7.CronActorAddr + Methods = builtin7.MethodsCron ) type State interface { diff --git a/chain/actors/builtin/cron/v7.go b/chain/actors/builtin/cron/v7.go new file mode 100644 index 000000000..e5538c89f --- /dev/null +++ b/chain/actors/builtin/cron/v7.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = *cron7.ConstructState(cron7.BuiltInEntries()) + return &out, nil +} + +type state7 struct { + cron7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index ee06eeab7..737241ffe 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -25,6 +25,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -52,11 +54,15 @@ func init() { builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.InitActorAddr - Methods = builtin6.MethodsInit + Address = builtin7.InitActorAddr + Methods = builtin7.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -80,6 +86,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.InitActorCodeID: return load6(store, act.Head) + case builtin7.InitActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -105,6 +114,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e case actors.Version6: return make6(store, networkName) + case actors.Version7: + return make7(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -130,6 +142,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.InitActorCodeID, nil + case actors.Version7: + return builtin7.InitActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/init/v7.go b/chain/actors/builtin/init/v7.go new file mode 100644 index 000000000..341aa52cd --- /dev/null +++ b/chain/actors/builtin/init/v7.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, networkName string) (State, error) { + out := state7{store: store} + + s, err := init7.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + init7.State + store adt.Store +} + +func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state7) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state7) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state7) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state7) Remove(addrs ...address.Address) (err error) { + m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state7) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state7) AddressMap() (adt.Map, error) { + return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 7e35f3919..6781b55e3 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -25,6 +25,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -56,11 +58,15 @@ func init() { builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StorageMarketActorAddr - Methods = builtin6.MethodsMarket + Address = builtin7.StorageMarketActorAddr + Methods = builtin7.MethodsMarket ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -84,6 +90,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMarketActorCodeID: return load6(store, act.Head) + case builtin7.StorageMarketActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -109,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -134,6 +146,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMarketActorCodeID, nil + case actors.Version7: + return builtin7.StorageMarketActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -211,6 +226,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actors.Version6: return decodePublishStorageDealsReturn6(b) + case actors.Version7: + return decodePublishStorageDealsReturn7(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go new file mode 100644 index 000000000..553913146 --- /dev/null +++ b/chain/actors/builtin/market/v7.go @@ -0,0 +1,252 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := market7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + market7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state7) BalancesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil +} + +func (s *state7) StatesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState7.State.States), nil +} + +func (s *state7) States() (DealStates, error) { + stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates7{stateArray}, nil +} + +func (s *state7) ProposalsChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState7.State.Proposals), nil +} + +func (s *state7) Proposals() (DealProposals, error) { + proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals7{proposalArray}, nil +} + +func (s *state7) EscrowTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) LockedTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state7) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable7 struct { + *adt7.BalanceTable +} + +func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt7.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates7 struct { + adt.Array +} + +func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal7 market7.DealState + found, err := s.Array.Get(uint64(dealID), &deal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV7DealState(deal7) + return &deal, true, nil +} + +func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds7 market7.DealState + return s.Array.ForEach(&ds7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealState(ds7)) + }) +} + +func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { + var ds7 market7.DealState + if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV7DealState(ds7) + return &ds, nil +} + +func (s *dealStates7) array() adt.Array { + return s.Array +} + +func fromV7DealState(v7 market7.DealState) DealState { + return (DealState)(v7) +} + +type dealProposals7 struct { + adt.Array +} + +func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal7 market7.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV7DealProposal(proposal7) + return &proposal, true, nil +} + +func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp7 market7.DealProposal + return s.Array.ForEach(&dp7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealProposal(dp7)) + }) +} + +func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp7 market7.DealProposal + if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV7DealProposal(dp7) + return &dp, nil +} + +func (s *dealProposals7) array() adt.Array { + return s.Array +} + +func fromV7DealProposal(v7 market7.DealProposal) DealProposal { + return (DealProposal)(v7) +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil) + +func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) { + var retval market7.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn7{retval}, nil +} + +type publishStorageDealsReturn7 struct { + market7.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) { + + return r.ValidDeals.IsSet(index) + +} + +func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 2669a05a6..2b6b78ebc 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -177,6 +177,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 1c7f47e11..e60ff8da8 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -35,6 +35,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -63,9 +65,13 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } -var Methods = builtin6.MethodsMiner +var Methods = builtin7.MethodsMiner // Unchanged between v0, v2, v3, v4, and v5 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod @@ -102,6 +108,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMinerActorCodeID: return load6(store, act.Head) + case builtin7.StorageMinerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -127,6 +136,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -152,6 +164,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMinerActorCodeID, nil + case actors.Version7: + return builtin7.StorageMinerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -251,6 +266,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index 2ea6a905e..775631961 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -138,11 +138,22 @@ func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpirati return nil, err } // NOTE: this can be optimized significantly. - // 1. If the sector is non-faulty, it will either expire on-time (can be +{{if (ge .v 7) -}} + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). +{{- else -}} + // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. +{{- end}} +{{if (ge .v 6) -}} + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. +{{- else -}} // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. +{{- end}} + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { @@ -554,8 +565,7 @@ func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) { } func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { -{{if (ge .v 2)}} - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v{{.v}}.SectorNumber, SealProof: v{{.v}}.SealProof, SealedCID: v{{.v}}.SealedCID, @@ -567,10 +577,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO InitialPledge: v{{.v}}.InitialPledge, ExpectedDayReward: v{{.v}}.ExpectedDayReward, ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, + {{if (ge .v 7)}} + SectorKeyCID: v{{.v}}.SectorKeyCID, + {{end}} } -{{else}} - return (SectorOnChainInfo)(v0) -{{end}} + return info } func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index 2f24e8454..5fafc31ef 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi. return 0, xerrors.Errorf("unsupported network version") } + +// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating +// new miner actors and new sectors +func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) { + switch ssize { + case 2 << 10: + return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil + case 8 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil + case 512 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil + case 32 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil + case 64 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 564bcbbc2..8bde8bf73 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -140,6 +140,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { @@ -505,9 +506,20 @@ func (p *partition0) UnprovenSectors() (bitfield.BitField, error) { } func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { - - return (SectorOnChainInfo)(v0) - + info := SectorOnChainInfo{ + SectorNumber: v0.SectorNumber, + SealProof: v0.SealProof, + SealedCID: v0.SealedCID, + DealIDs: v0.DealIDs, + Activation: v0.Activation, + Expiration: v0.Expiration, + DealWeight: v0.DealWeight, + VerifiedDealWeight: v0.VerifiedDealWeight, + InitialPledge: v0.InitialPledge, + ExpectedDayReward: v0.ExpectedDayReward, + ExpectedStoragePledge: v0.ExpectedStoragePledge, + } + return info } func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index fe0863111..bbfdd403e 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -138,6 +138,7 @@ func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error { @@ -535,8 +536,7 @@ func (p *partition2) UnprovenSectors() (bitfield.BitField, error) { } func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v2.SectorNumber, SealProof: v2.SealProof, SealedCID: v2.SealedCID, @@ -549,7 +549,7 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v2.ExpectedDayReward, ExpectedStoragePledge: v2.ExpectedStoragePledge, } - + return info } func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index b0d5429ea..68505918a 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -140,6 +140,7 @@ func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition3) UnprovenSectors() (bitfield.BitField, error) { } func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v3.SectorNumber, SealProof: v3.SealProof, SealedCID: v3.SealedCID, @@ -550,7 +550,7 @@ func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v3.ExpectedDayReward, ExpectedStoragePledge: v3.ExpectedStoragePledge, } - + return info } func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 7e5a9761a..5c40d4189 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -140,6 +140,7 @@ func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition4) UnprovenSectors() (bitfield.BitField, error) { } func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v4.SectorNumber, SealProof: v4.SealProof, SealedCID: v4.SealedCID, @@ -550,7 +550,7 @@ func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v4.ExpectedDayReward, ExpectedStoragePledge: v4.ExpectedStoragePledge, } - + return info } func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 7f4aaf168..f717934f4 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -140,6 +140,7 @@ func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition5) UnprovenSectors() (bitfield.BitField, error) { } func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v5.SectorNumber, SealProof: v5.SealProof, SealedCID: v5.SealedCID, @@ -550,7 +550,7 @@ func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v5.ExpectedDayReward, ExpectedStoragePledge: v5.ExpectedStoragePledge, } - + return info } func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v6.go b/chain/actors/builtin/miner/v6.go index de5a22a10..7a9dfb0df 100644 --- a/chain/actors/builtin/miner/v6.go +++ b/chain/actors/builtin/miner/v6.go @@ -138,8 +138,9 @@ func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. - // 2. If it's faulty, it will expire early within the first 14 entries + // 2. If it's faulty, it will expire early within the first 42 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition6) UnprovenSectors() (bitfield.BitField, error) { } func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v6.SectorNumber, SealProof: v6.SealProof, SealedCID: v6.SealedCID, @@ -550,7 +550,7 @@ func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v6.ExpectedDayReward, ExpectedStoragePledge: v6.ExpectedStoragePledge, } - + return info } func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v7.go b/chain/actors/builtin/miner/v7.go new file mode 100644 index 000000000..e1b2520e4 --- /dev/null +++ b/chain/actors/builtin/miner/v7.go @@ -0,0 +1,571 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = miner7.State{} + return &out, nil +} + +type state7 struct { + miner7.State + store adt.Store +} + +type deadline7 struct { + miner7.Deadline + store adt.Store +} + +type partition7 struct { + miner7.Partition + store adt.Store +} + +func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state7) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state7) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state7) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state7) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner7.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner7.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state7) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner7.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV7SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner7.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info7 miner7.SectorOnChainInfo + if err := sectors.ForEach(&info7, func(_ int64) error { + info := fromV7SectorOnChainInfo(info7) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos7, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos7)) + for i, info7 := range infos7 { + info := fromV7SectorOnChainInfo(*info7) + infos[i] = &info + } + return infos, nil +} + +func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state7) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline7{*dl, s.store}, nil +} + +func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error { + return cb(i, &deadline7{*dl, s.store}) + }) +} + +func (s *state7) NumDeadlines() (uint64, error) { + return miner7.WPoStPeriodDeadlines, nil +} + +func (s *state7) DeadlinesChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other7.Deadlines), nil +} + +func (s *state7) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state7) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state7) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state7) sectors() (adt.Array, error) { + return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth) +} + +func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner7.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV7SectorOnChainInfo(si), nil +} + +func (s *state7) precommits() (adt.Map, error) { + return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner7.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV7SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state7) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner7.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline7) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition7{*p, d.store}, nil +} + +func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner7.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition7{part, d.store}) + }) +} + +func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) { + other7, ok := other.(*deadline7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil +} + +func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline7) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition7) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition7) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition7) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition7) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v7.SectorNumber, + SealProof: v7.SealProof, + SealedCID: v7.SealedCID, + DealIDs: v7.DealIDs, + Activation: v7.Activation, + Expiration: v7.Expiration, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + InitialPledge: v7.InitialPledge, + ExpectedDayReward: v7.ExpectedDayReward, + ExpectedStoragePledge: v7.ExpectedStoragePledge, + + SectorKeyCID: v7.SectorKeyCID, + } + return info +} + +func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v7.Info), + PreCommitDeposit: v7.PreCommitDeposit, + PreCommitEpoch: v7.PreCommitEpoch, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + } + +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/message7.go b/chain/actors/builtin/multisig/message7.go new file mode 100644 index 000000000..e7fb83e9b --- /dev/null +++ b/chain/actors/builtin/multisig/message7.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ message0 } + +func (m message7) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig7.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init7.ExecParams{ + CodeCID: builtin7.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin7.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index ee725f7e5..f1b50475a 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/go-cid" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig" + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.MultisigActorCodeID: return load6(store, act.Head) + case builtin7.MultisigActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th case actors.Version6: return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actors.Version7: + return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.MultisigActorCodeID, nil + case actors.Version7: + return builtin7.MultisigActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -156,7 +171,7 @@ type State interface { type Transaction = msig0.Transaction -var Methods = builtin6.MethodsMultisig +var Methods = builtin7.MethodsMultisig func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -178,6 +193,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{message0{from}} + + case actors.Version7: + return message7{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -201,13 +219,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig6.ProposalHashData -type ProposeReturn = msig6.ProposeReturn -type ProposeParams = msig6.ProposeParams -type ApproveReturn = msig6.ApproveReturn +type ProposalHashData = msig7.ProposalHashData +type ProposeReturn = msig7.ProposeReturn +type ProposeParams = msig7.ProposeParams +type ApproveReturn = msig7.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig6.TxnIDParams{ID: msig6.TxnID(id)} + params := msig7.TxnIDParams{ID: msig7.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) diff --git a/chain/actors/builtin/multisig/v7.go b/chain/actors/builtin/multisig/v7.go new file mode 100644 index 000000000..bbe41f3db --- /dev/null +++ b/chain/actors/builtin/multisig/v7.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state7{store: store} + out.State = msig7.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state7 struct { + msig7.State + store adt.Store +} + +func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state7) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state7) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state7) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state7) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state7) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig7.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state7) PendingTxnChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other7.PendingTxns), nil +} + +func (s *state7) transactions() (adt.Map, error) { + return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig7.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/message7.go b/chain/actors/builtin/paych/message7.go new file mode 100644 index 000000000..41dfa1bdd --- /dev/null +++ b/chain/actors/builtin/paych/message7.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ from address.Address } + +func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init7.ExecParams{ + CodeCID: builtin7.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin7.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message7) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Settle, + }, nil +} + +func (m message7) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index eea3659f8..f807b33ed 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } // Load returns an abstract copy of payment channel state, irregardless of actor version @@ -82,6 +88,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.PaymentChannelActorCodeID: return load6(store, act.Head) + case builtin7.PaymentChannelActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -107,6 +116,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -132,6 +144,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.PaymentChannelActorCodeID, nil + case actors.Version7: + return builtin7.PaymentChannelActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -185,7 +200,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) { return &sv, nil } -var Methods = builtin6.MethodsPaych +var Methods = builtin7.MethodsPaych func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -208,6 +223,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{from} + case actors.Version7: + return message7{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } diff --git a/chain/actors/builtin/paych/v7.go b/chain/actors/builtin/paych/v7.go new file mode 100644 index 000000000..ce09ea2e4 --- /dev/null +++ b/chain/actors/builtin/paych/v7.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = paych7.State{} + return &out, nil +} + +type state7 struct { + paych7.State + store adt.Store + lsAmt *adt7.Array +} + +// Channel owner, who has funded the actor +func (s *state7) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state7) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state7) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state7) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state7) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych7.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState7{ls}) + }) +} + +type laneState7 struct { + paych7.LaneState +} + +func (ls *laneState7) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState7) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 84bd6948a..9b73cdd60 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -26,6 +26,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -53,11 +55,15 @@ func init() { builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StoragePowerActorAddr - Methods = builtin6.MethodsPower + Address = builtin7.StoragePowerActorAddr + Methods = builtin7.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StoragePowerActorCodeID: return load6(store, act.Head) + case builtin7.StoragePowerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StoragePowerActorCodeID, nil + case actors.Version7: + return builtin7.StoragePowerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/power/v7.go b/chain/actors/builtin/power/v7.go new file mode 100644 index 000000000..af1761cb2 --- /dev/null +++ b/chain/actors/builtin/power/v7.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := power7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + power7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state7) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state7) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power7.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV7FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state7) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state7) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power7.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state7) ClaimsChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other7.State.Claims), nil +} + +func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) claims() (adt.Map, error) { + return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power7.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV7Claim(ci), nil +} + +func fromV7Claim(v7 power7.Claim) Claim { + return Claim{ + RawBytePower: v7.RawBytePower, + QualityAdjPower: v7.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 38d5b5b87..b6ee2f146 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -51,11 +53,15 @@ func init() { builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.RewardActorAddr - Methods = builtin6.MethodsReward + Address = builtin7.RewardActorAddr + Methods = builtin7.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -79,6 +85,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.RewardActorCodeID: return load6(store, act.Head) + case builtin7.RewardActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -104,6 +113,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage case actors.Version6: return make6(store, currRealizedPower) + case actors.Version7: + return make7(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -129,6 +141,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.RewardActorCodeID, nil + case actors.Version7: + return builtin7.RewardActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/reward/v7.go b/chain/actors/builtin/reward/v7.go new file mode 100644 index 000000000..368bb3abd --- /dev/null +++ b/chain/actors/builtin/reward/v7.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state7{store: store} + out.State = *reward7.ConstructState(currRealizedPower) + return &out, nil +} + +type state7 struct { + reward7.State + store adt.Store +} + +func (s *state7) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state7) CumsumBaseline() (reward7.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state7) CumsumRealized() (reward7.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner7.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 3d6105c38..fb7515f35 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -17,10 +17,12 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) var ( - Address = builtin6.SystemActorAddr + Address = builtin7.SystemActorAddr ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -44,6 +46,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -69,6 +74,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.SystemActorCodeID, nil + case actors.Version7: + return builtin7.SystemActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/system/v7.go b/chain/actors/builtin/system/v7.go new file mode 100644 index 000000000..813add5fb --- /dev/null +++ b/chain/actors/builtin/system/v7.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = system7.State{} + return &out, nil +} + +type state7 struct { + system7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go new file mode 100644 index 000000000..9b2ca928a --- /dev/null +++ b/chain/actors/builtin/verifreg/v7.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state7{store: store} + + s, err := verifreg7.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + verifreg7.State + store adt.Store +} + +func (s *state7) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiedClients, addr) +} + +func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiers, addr) +} + +func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiers, cb) +} + +func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiedClients, cb) +} + +func (s *state7) verifiedClients() (adt.Map, error) { + return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) verifiers() (adt.Map, error) { + return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 31e8e5a08..f6281334d 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -53,11 +55,15 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } var ( - Address = builtin6.VerifiedRegistryActorAddr - Methods = builtin6.MethodsVerifiedRegistry + Address = builtin7.VerifiedRegistryActorAddr + Methods = builtin7.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.VerifiedRegistryActorCodeID: return load6(store, act.Head) + case builtin7.VerifiedRegistryActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres case actors.Version6: return make6(store, rootKeyAddress) + case actors.Version7: + return make7(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.VerifiedRegistryActorCodeID, nil + case actors.Version7: + return builtin7.VerifiedRegistryActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index e00a6ae10..f51da7aa7 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -40,14 +40,19 @@ import ( miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg" - paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" ) const ( - ChainFinality = miner6.ChainFinality + ChainFinality = miner7.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych6.SettleDelay - MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych7.SettleDelay + MaxPreCommitRandomnessLookback = builtin7.EpochsInDay + SealRandomnessLookback ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -72,6 +77,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + AddSupportedProofTypes(types...) } @@ -119,6 +126,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.WindowPoStProofTypes[wpp] = struct{}{} + miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner7.WindowPoStProofTypes[wpp] = struct{}{} + } } @@ -139,11 +155,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner6.PreCommitChallengeDelay = delay + miner7.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner6.PreCommitChallengeDelay + return miner7.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -173,6 +191,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin7.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -191,6 +213,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg6.MinVerifiedDealSize = size + verifreg7.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -220,6 +244,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a return miner6.MaxProveCommitDuration[t], nil + case actors.Version7: + + return miner7.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -255,6 +283,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -298,13 +331,18 @@ func DealProviderCollateralBounds( min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actors.Version7: + + min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market6.DealDurationBounds(pieceSize) + return market7.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -345,6 +383,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner6.WPoStDisputeWindow = period * 30 + miner7.WPoStChallengeWindow = period + miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner7.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -357,15 +402,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { } func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner6.MaxSectorExpirationExtension + return miner7.MaxSectorExpirationExtension } func GetMinSectorExpiration() abi.ChainEpoch { - return miner6.MinSectorExpiration + return miner7.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin7.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -378,8 +423,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e func GetDefaultSectorSize() abi.SectorSize { // supported sector sizes are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8)) - for spt := range miner6.PreCommitSealProofTypesV8 { + szs := make([]abi.SectorSize, 0, len(miner7.PreCommitSealProofTypesV8)) + for spt := range miner7.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -404,7 +449,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin7.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -432,6 +477,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actors.Version6: return miner6.AddressedSectorsMax, nil + case actors.Version7: + return miner7.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -469,6 +517,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner6.DeclarationsMax, nil + case actors.Version7: + + return miner7.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -505,6 +557,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -541,6 +597,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 7b7a6393a..af51161c9 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -20,9 +20,9 @@ const ({{range .actorVersions}} /* inline-gen start */ -var LatestVersion = 6 +var LatestVersion = 7 -var Versions = []int{0, 2, 3, 4, 5, 6} +var Versions = []int{0, 2, 3, 4, 5, 6, 7} const ( Version0 Version = 0 @@ -31,6 +31,7 @@ const ( Version4 Version = 4 Version5 Version = 5 Version6 Version = 6 + Version7 Version = 7 ) /* inline-gen end */ @@ -50,6 +51,8 @@ func VersionForNetwork(version network.Version) (Version, error) { return Version5, nil case network.Version14: return Version6, nil + case network.Version15: + return Version7, nil default: return -1, fmt.Errorf("unsupported network version %d", version) } diff --git a/chain/checkpoint.go b/chain/checkpoint.go index a3660a45c..4f8310593 100644 --- a/chain/checkpoint.go +++ b/chain/checkpoint.go @@ -13,7 +13,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("called with empty tsk") } - ts, err := syncer.ChainStore().LoadTipSet(tsk) + ts, err := syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1) if err != nil { @@ -28,7 +28,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) } - if err := syncer.ChainStore().SetCheckpoint(ts); err != nil { + if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain checkpoint: %w", err) } @@ -41,7 +41,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return nil } - if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc { + if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc { return nil } @@ -50,7 +50,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return xerrors.Errorf("failed to collect chain for checkpoint: %w", err) } - if err := syncer.ChainStore().SetHead(ts); err != nil { + if err := syncer.ChainStore().SetHead(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain head: %w", err) } return nil diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/filcns/compute_state.go index 3c333298e..7000af303 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/filcns/compute_state.go @@ -28,6 +28,7 @@ import ( exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" /* inline-gen end */ @@ -59,6 +60,7 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...) + inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...) /* inline-gen end */ @@ -289,7 +291,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag var parentEpoch abi.ChainEpoch pstate := blks[0].ParentStateRoot if blks[0].Height > 0 { - parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0]) + parent, err := sm.ChainStore().GetBlock(ctx, blks[0].Parents[0]) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) } @@ -299,7 +301,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon()) - blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts) + blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ctx, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 883edd9a1..328fc8ec1 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -90,7 +90,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) h := b.Header - baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) + baseTs, err := filec.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } @@ -102,7 +102,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } - prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs) + prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) } @@ -171,7 +171,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) } if stateroot != h.ParentStateRoot { - msgs, err := filec.store.MessagesForTipset(baseTs) + msgs, err := filec.store.MessagesForTipset(ctx, baseTs) if err != nil { log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) } else { @@ -519,7 +519,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } @@ -553,7 +553,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } diff --git a/chain/consensus/filcns/mine.go b/chain/consensus/filcns/mine.go index bbda35fcf..35e38883d 100644 --- a/chain/consensus/filcns/mine.go +++ b/chain/consensus/filcns/mine.go @@ -15,7 +15,7 @@ import ( ) func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { - pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents) + pts, err := filec.sm.ChainStore().LoadTipSet(ctx, bt.Parents) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -59,14 +59,14 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. blsSigs = append(blsSigs, msg.Signature) blsMessages = append(blsMessages, &msg.Message) - c, err := filec.sm.ChainStore().PutMessage(&msg.Message) + c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message) if err != nil { return nil, err } blsMsgCids = append(blsMsgCids, c) - } else { - c, err := filec.sm.ChainStore().PutMessage(msg) + } else if msg.Signature.Type == crypto.SigTypeSecp256k1 { + c, err := filec.sm.ChainStore().PutMessage(ctx, msg) if err != nil { return nil, err } @@ -74,6 +74,8 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. secpkMsgCids = append(secpkMsgCids, c) secpkMessages = append(secpkMessages, msg) + } else { + return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type) } } diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index cf4c62bf3..863193180 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -6,6 +6,7 @@ import ( "time" "github.com/filecoin-project/specs-actors/v6/actors/migration/nv14" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -156,6 +157,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { StopWithin: 5, }}, Expensive: true, + }, { + Height: build.UpgradeSnapDealsHeight, + Network: network.Version15, + Migration: UpgradeActorsV7, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV7, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV7, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, }, } @@ -625,7 +642,7 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre // TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { - gb, err := sm.ChainStore().GetGenesis() + gb, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } @@ -1170,7 +1187,94 @@ func upgradeActorsV6Common( // Perform the migration newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv15.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + config := nv15.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV7Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv15.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion4 { + return cid.Undef, xerrors.Errorf( + "expected state root version 4 for actors v7 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err) } // Persist the result. diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 61dd25fbb..5f52cbd92 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -87,7 +87,7 @@ func (fcs *fakeCS) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ( } // copied from the chainstore - revert, apply, err := store.ReorgOps(func(tsk types.TipSetKey) (*types.TipSet, error) { + revert, apply, err := store.ReorgOps(ctx, func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { return fcs.ChainGetTipSet(ctx, tsk) }, fromTs, toTs) if err != nil { diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go index 2ed48dc39..7a73355a5 100644 --- a/chain/events/state/mock/api.go +++ b/chain/events/state/mock/api.go @@ -27,11 +27,11 @@ func NewMockAPI(bs blockstore.Blockstore) *MockAPI { } func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) + return m.bs.Has(ctx, c) } func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) + blk, err := m.bs.Get(ctx, c) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } diff --git a/chain/exchange/server.go b/chain/exchange/server.go index 7c1624e57..37d49d7bc 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -136,7 +136,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") defer span.End() - chain, err := collectChainSegment(s.cs, req) + chain, err := collectChainSegment(ctx, s.cs, req) if err != nil { log.Warn("block sync request: collectChainSegment failed: ", err) return &Response{ @@ -156,13 +156,13 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re }, nil } -func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { +func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { var bstips []*BSTipSet cur := req.head for { var bst BSTipSet - ts, err := cs.LoadTipSet(cur) + ts, err := cs.LoadTipSet(ctx, cur) if err != nil { return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) } @@ -172,7 +172,7 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } if req.options.IncludeMessages { - bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts) + bmsgs, bmincl, smsgs, smincl, err := gatherMessages(ctx, cs, ts) if err != nil { return nil, xerrors.Errorf("gather messages failed: %w", err) } @@ -197,14 +197,14 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } } -func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { +func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { blsmsgmap := make(map[cid.Cid]uint64) secpkmsgmap := make(map[cid.Cid]uint64) var secpkincl, blsincl [][]uint64 var blscids, secpkcids []cid.Cid for _, block := range ts.Blocks() { - bc, sc, err := cs.ReadMsgMetaCids(block.Messages) + bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages) if err != nil { return nil, nil, nil, nil, err } @@ -237,12 +237,12 @@ func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [ secpkincl = append(secpkincl, smi) } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) if err != nil { return nil, nil, nil, nil, err } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { return nil, nil, nil, nil, err } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 69ab32d58..461a826e8 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/go-state-types/network" @@ -239,7 +241,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) - if err := cs.SetGenesis(genb.Genesis); err != nil { + if err := cs.SetGenesis(context.TODO(), genb.Genesis); err != nil { return nil, xerrors.Errorf("set genesis failed: %w", err) } @@ -469,7 +471,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) } - if err := cg.cs.PersistBlockHeaders(fblk.Header); err != nil { + if err := cg.cs.PersistBlockHeaders(context.TODO(), fblk.Header); err != nil { return nil, xerrors.Errorf("chainstore AddBlock: %w", err) } @@ -686,6 +688,10 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri panic("not supported") } +func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + panic("not supported") +} + func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 29f03e2af..64fd76d2b 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -479,6 +479,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca verifNeeds := make(map[address.Address]abi.PaddedPieceSize) var sum abi.PaddedPieceSize + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + vmopt := vm.VMOpts{ StateBase: stateroot, Epoch: 0, @@ -486,7 +490,7 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca Bstore: cs.StateBlockstore(), Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), - CircSupplyCalc: nil, + CircSupplyCalc: csc, NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { return nv }, @@ -591,7 +595,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto if err != nil { return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) } - if err := bs.Put(mmb); err != nil { + if err := bs.Put(ctx, mmb); err != nil { return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) } @@ -621,7 +625,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid") } - if err := bs.Put(gblk); err != nil { + if err := bs.Put(ctx, gblk); err != nil { return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err) } @@ -652,7 +656,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("serializing block header failed: %w", err) } - if err := bs.Put(sb); err != nil { + if err := bs.Put(ctx, sb); err != nil { return nil, xerrors.Errorf("putting header to blockstore: %w", err) } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index edacfe304..a688a0324 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -6,6 +6,8 @@ import ( "fmt" "math/rand" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" "github.com/ipfs/go-cid" @@ -29,7 +31,6 @@ import ( market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -57,7 +58,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime5.Syscalls + runtime7.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -65,7 +66,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime7.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } @@ -509,31 +510,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // TODO: copied from actors test harness, deduplicate or remove from here type fakeRand struct{} -func (fr *fakeRand) GetChainRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomness(ctx context.Context, rnv network.Version, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint return out, nil } -func (fr *fakeRand) GetChainRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV3(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, rnv network.Version, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint return out, nil diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index 5edcd5439..de3af5825 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -1,6 +1,7 @@ package slashfilter import ( + "context" "fmt" "github.com/filecoin-project/lotus/build" @@ -27,7 +28,7 @@ func New(dstore ds.Batching) *SlashFilter { } } -func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { +func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { return nil } @@ -35,7 +36,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) - if err := checkFault(f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { + if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { return err } } @@ -43,7 +44,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes())) { // time-offset mining faults (2 blocks with the same parents) - if err := checkFault(f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { + if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { return err } } @@ -53,14 +54,14 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo // First check if we have mined a block on the parent epoch parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) - have, err := f.byEpoch.Has(parentEpochKey) + have, err := f.byEpoch.Has(ctx, parentEpochKey) if err != nil { return err } if have { // If we had, make sure it's in our parent tipset - cidb, err := f.byEpoch.Get(parentEpochKey) + cidb, err := f.byEpoch.Get(ctx, parentEpochKey) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } @@ -83,25 +84,25 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo } } - if err := f.byParents.Put(parentsKey, bh.Cid().Bytes()); err != nil { + if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } - if err := f.byEpoch.Put(epochKey, bh.Cid().Bytes()); err != nil { + if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } return nil } -func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { - fault, err := t.Has(key) +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { + fault, err := t.Has(ctx, key) if err != nil { return err } if fault { - cidb, err := t.Get(key) + cidb, err := t.Get(ctx, key) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go index 5becfdfa7..e934201d7 100644 --- a/chain/market/fundmanager.go +++ b/chain/market/fundmanager.go @@ -89,7 +89,7 @@ func (fm *FundManager) Start() error { // - in State() only load addresses with in-progress messages // - load the others just-in-time from getFundedAddress // - delete(fm.fundedAddrs, addr) when the queue has been processed - return fm.str.forEach(func(state *FundedAddressState) { + return fm.str.forEach(fm.ctx, func(state *FundedAddressState) { fa := newFundedAddress(fm, state.Addr) fa.state = state fm.fundedAddrs[fa.state.Addr] = fa @@ -322,7 +322,7 @@ func (a *fundedAddress) clearWaitState() { // Save state to datastore func (a *fundedAddress) saveState() { // Not much we can do if saving to the datastore fails, just log - err := a.str.save(a.state) + err := a.str.save(a.ctx, a.state) if err != nil { log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err) } diff --git a/chain/market/store.go b/chain/market/store.go index e0d0e10be..9818b1d80 100644 --- a/chain/market/store.go +++ b/chain/market/store.go @@ -2,6 +2,7 @@ package market import ( "bytes" + "context" cborrpc "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-datastore" @@ -27,7 +28,7 @@ func newStore(ds dtypes.MetadataDS) *Store { } // save the state to the datastore -func (ps *Store) save(state *FundedAddressState) error { +func (ps *Store) save(ctx context.Context, state *FundedAddressState) error { k := dskeyForAddr(state.Addr) b, err := cborrpc.Dump(state) @@ -35,14 +36,14 @@ func (ps *Store) save(state *FundedAddressState) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // get the state for the given address -func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { +func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) { k := dskeyForAddr(addr) - data, err := ps.ds.Get(k) + data, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -56,8 +57,8 @@ func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { } // forEach calls iter with each address in the datastore -func (ps *Store) forEach(iter func(*FundedAddressState)) error { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr}) +func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr}) if err != nil { return err } diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go index a511f84b7..3c07a8a0f 100644 --- a/chain/messagepool/config.go +++ b/chain/messagepool/config.go @@ -1,6 +1,7 @@ package messagepool import ( + "context" "encoding/json" "fmt" "time" @@ -20,8 +21,8 @@ var ( ConfigKey = datastore.NewKey("/mpool/config") ) -func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { - haveCfg, err := ds.Has(ConfigKey) +func loadConfig(ctx context.Context, ds dtypes.MetadataDS) (*types.MpoolConfig, error) { + haveCfg, err := ds.Has(ctx, ConfigKey) if err != nil { return nil, err } @@ -30,7 +31,7 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return DefaultConfig(), nil } - cfgBytes, err := ds.Get(ConfigKey) + cfgBytes, err := ds.Get(ctx, ConfigKey) if err != nil { return nil, err } @@ -39,12 +40,12 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return cfg, err } -func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { +func saveConfig(ctx context.Context, cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { cfgBytes, err := json.Marshal(cfg) if err != nil { return err } - return ds.Put(ConfigKey, cfgBytes) + return ds.Put(ctx, ConfigKey, cfgBytes) } func (mp *MessagePool) GetConfig() *types.MpoolConfig { @@ -68,7 +69,7 @@ func validateConfg(cfg *types.MpoolConfig) error { return nil } -func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { +func (mp *MessagePool) SetConfig(ctx context.Context, cfg *types.MpoolConfig) error { if err := validateConfg(cfg); err != nil { return err } @@ -76,7 +77,7 @@ func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { mp.cfgLk.Lock() mp.cfg = cfg - err := saveConfig(cfg, mp.ds) + err := saveConfig(ctx, cfg, mp.ds) if err != nil { log.Warnf("error persisting mpool config: %s", err) } diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 06343e9c9..43d014502 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -358,11 +358,11 @@ func (ms *msgSet) toSlice() []*types.SignedMessage { return set } -func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { +func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) - cfg, err := loadConfig(ds) + cfg, err := loadConfig(ctx, ds) if err != nil { return nil, xerrors.Errorf("error loading mpool config: %w", err) } @@ -601,7 +601,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err return xerrors.Errorf("error serializing message: %w", err) } - if err := mp.localMsgs.Put(datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { + if err := mp.localMsgs.Put(ctx, datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { return xerrors.Errorf("persisting local message: %w", err) } @@ -909,12 +909,12 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st mp.blsSigCache.Add(m.Cid(), m.Signature) } - if _, err := mp.api.PutMessage(m); err != nil { + if _, err := mp.api.PutMessage(ctx, m); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } - if _, err := mp.api.PutMessage(&m.Message); err != nil { + if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } @@ -1207,7 +1207,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a var merr error for _, ts := range revert { - pts, err := mp.api.LoadTipSet(ts.Parents()) + pts, err := mp.api.LoadTipSet(ctx, ts.Parents()) if err != nil { log.Errorf("error loading reverted tipset parent: %s", err) merr = multierror.Append(merr, err) @@ -1216,7 +1216,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = pts - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1232,7 +1232,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = ts for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1338,7 +1338,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a return merr } -func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { +func (mp *MessagePool) runHeadChange(ctx context.Context, from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { add := func(m *types.SignedMessage) { s, ok := rmsgs[m.Message.From] if !ok { @@ -1360,7 +1360,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs } - revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to) + revert, apply, err := store.ReorgOps(ctx, mp.api.LoadTipSet, from, to) if err != nil { return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err) } @@ -1368,7 +1368,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs var merr error for _, ts := range revert { - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1382,7 +1382,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs for _, ts := range apply { for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1407,11 +1407,11 @@ type statBucket struct { msgs map[uint64]*types.SignedMessage } -func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.SignedMessage, error) { +func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.BlockHeader) ([]*types.SignedMessage, error) { out := make([]*types.SignedMessage, 0) for _, b := range blks { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) } @@ -1477,7 +1477,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err } func (mp *MessagePool) loadLocal(ctx context.Context) error { - res, err := mp.localMsgs.Query(query.Query{}) + res, err := mp.localMsgs.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query local messages: %w", err) } @@ -1525,7 +1525,7 @@ func (mp *MessagePool) Clear(ctx context.Context, local bool) { if ok { for _, m := range mset.msgs { - err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + err := mp.localMsgs.Delete(ctx, datastore.NewKey(string(m.Cid().Bytes()))) if err != nil { log.Warnf("error deleting local message: %s", err) } diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index fe8ed231a..6bd60da34 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -104,7 +104,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return tma.tipsets[0] } -func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { +func (tma *testMpoolAPI) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } @@ -165,16 +165,16 @@ func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr add return addr, nil } -func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { +func (tma *testMpoolAPI) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { return nil, tma.bmsgs[h.Cid()], nil } -func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { +func (tma *testMpoolAPI) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { if len(ts.Blocks()) != 1 { panic("cant deal with multiblock tipsets in this test") } - bm, sm, err := tma.MessagesForBlock(ts.Blocks()[0]) + bm, sm, err := tma.MessagesForBlock(ctx, ts.Blocks()[0]) if err != nil { return nil, err } @@ -191,7 +191,7 @@ func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, return out, nil } -func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (tma *testMpoolAPI) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { for _, ts := range tma.tipsets { if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { return ts, nil @@ -235,7 +235,7 @@ func TestMessagePool(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -279,7 +279,7 @@ func TestCheckMessageBig(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) assert.NoError(t, err) to := mock.Address(1001) @@ -342,7 +342,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -393,7 +393,7 @@ func TestRevertMessages(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -457,7 +457,7 @@ func TestPruningSimple(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -502,7 +502,7 @@ func TestLoadLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -546,7 +546,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - mp, err = New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err = New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -576,7 +576,7 @@ func TestClearAll(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -633,7 +633,7 @@ func TestClearNonLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -697,7 +697,7 @@ func TestUpdates(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 0f904c52c..9dcff2b33 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -23,13 +23,13 @@ var ( type Provider interface { SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet - PutMessage(m types.ChainMsg) (cid.Cid, error) + PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) PubSubPublish(string, []byte) error GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) - MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) - MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) - LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(context.Context, *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + MessagesForTipset(context.Context, *types.TipSet) ([]types.ChainMsg, error) + LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) IsLite() bool } @@ -66,8 +66,8 @@ func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return mpp.sm.ChainStore().GetHeaviestTipSet() } -func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) { - return mpp.sm.ChainStore().PutMessage(m) +func (mpp *mpoolProvider) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { + return mpp.sm.ChainStore().PutMessage(ctx, m) } func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { @@ -103,16 +103,16 @@ func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr ad return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts) } -func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return mpp.sm.ChainStore().MessagesForBlock(h) +func (mpp *mpoolProvider) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + return mpp.sm.ChainStore().MessagesForBlock(ctx, h) } -func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - return mpp.sm.ChainStore().MessagesForTipset(ts) +func (mpp *mpoolProvider) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + return mpp.sm.ChainStore().MessagesForTipset(ctx, ts) } -func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - return mpp.sm.ChainStore().LoadTipSet(tsk) +func (mpp *mpoolProvider) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.ChainStore().LoadTipSet(ctx, tsk) } func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go index c10239b8e..d405afb65 100644 --- a/chain/messagepool/pruning.go +++ b/chain/messagepool/pruning.go @@ -49,7 +49,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro } baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) - pending, _ := mp.getPendingMessages(ts, ts) + pending, _ := mp.getPendingMessages(ctx, ts, ts) // protected actors -- not pruned protected := make(map[address.Address]struct{}) diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index 4323bdee1..d92b5bd58 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -121,7 +121,7 @@ loop: // we can't fit the current chain but there is gas to spare // trim it and push it down - chain.Trim(gasLimit, mp, baseFee) + chain.Trim(gasLimit, repubMsgLimit, mp, baseFee) for j := i; j < len(chains)-1; j++ { if chains[j].Before(chains[j+1]) { break @@ -131,6 +131,10 @@ loop: } count := 0 + if len(msgs) > repubMsgLimit { + msgs = msgs[:repubMsgLimit] + } + log.Infof("republishing %d messages", len(msgs)) for _, m := range msgs { mb, err := m.Serialize() diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index fa27d68ed..de32eaa6b 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -25,7 +25,7 @@ func TestRepubMessages(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index acff7c4cf..633e9b23f 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -7,6 +7,10 @@ import ( "sort" "time" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/crypto" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -34,9 +38,10 @@ type msgChain struct { merged bool next *msgChain prev *msgChain + sigType crypto.SigType } -func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -46,24 +51,156 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq // if the ticket quality is high enough that the first block has higher probability // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance + var sm *selectedMessages + var err error if tq > 0.84 { - msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) + sm, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) } else { - msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) + sm, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) } if err != nil { return nil, err } - if len(msgs) > build.BlockMessageLimit { - msgs = msgs[:build.BlockMessageLimit] + if sm == nil { + return nil, nil } - return msgs, nil + // one last sanity check + if len(sm.msgs) > build.BlockMessageLimit { + log.Errorf("message selection chose too many messages %d > %d", len(sm.msgs), build.BlockMessageLimit) + sm.msgs = sm.msgs[:build.BlockMessageLimit] + } + + return sm.msgs, nil } -func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +type selectedMessages struct { + msgs []*types.SignedMessage + gasLimit int64 + secpLimit int + blsLimit int +} + +// returns false if chain can't be added due to block constraints +func (sm *selectedMessages) tryToAdd(mc *msgChain) bool { + l := len(mc.msgs) + + if build.BlockMessageLimit < l+len(sm.msgs) || sm.gasLimit < mc.gasLimit { + return false + } + + if mc.sigType == crypto.SigTypeBLS { + if sm.blsLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.blsLimit -= l + sm.gasLimit -= mc.gasLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if sm.secpLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.secpLimit -= l + sm.gasLimit -= mc.gasLimit + } + + // don't add the weird sigType msg, but otherwise proceed + return true +} + +// returns false if messages can't be added due to block constraints +// will trim / invalidate chain as appropriate +func (sm *selectedMessages) tryToAddWithDeps(mc *msgChain, mp *MessagePool, baseFee types.BigInt) bool { + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := mc.gasLimit + chainMsgLimit := len(mc.msgs) + depGasLimit := int64(0) + depMsgLimit := 0 + smMsgLimit := 0 + + if mc.sigType == crypto.SigTypeBLS { + smMsgLimit = sm.blsLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + smMsgLimit = sm.secpLimit + } else { + return false + } + + if smMsgLimit > build.BlockMessageLimit-len(sm.msgs) { + smMsgLimit = build.BlockMessageLimit - len(sm.msgs) + } + + var chainDeps []*msgChain + for curChain := mc.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + chainMsgLimit += len(curChain.msgs) + depGasLimit += curChain.gasLimit + depMsgLimit += len(curChain.msgs) + } + + // the chain doesn't fit as-is, so trim / invalidate it and return false + if chainGasLimit > sm.gasLimit || chainMsgLimit > smMsgLimit { + + // it doesn't all fit; now we have to take into account the dependent chains before + // making a decision about trimming or invalidating. + // if the dependencies exceed the block limits, then we must invalidate the chain + // as it can never be included. + // Otherwise we can just trim and continue + if depGasLimit > sm.gasLimit || depMsgLimit >= smMsgLimit { + mc.Invalidate() + } else { + // dependencies fit, just trim it + mc.Trim(sm.gasLimit-depGasLimit, smMsgLimit-depMsgLimit, mp, baseFee) + } + + return false + } + + // the chain fits! include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + sm.msgs = append(sm.msgs, curChain.msgs...) + } + + mc.merged = true + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.gasLimit -= chainGasLimit + + if mc.sigType == crypto.SigTypeBLS { + sm.blsLimit -= chainMsgLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + sm.secpLimit -= chainMsgLimit + } + + return true +} + +func (sm *selectedMessages) trimChain(mc *msgChain, mp *MessagePool, baseFee types.BigInt) { + msgLimit := build.BlockMessageLimit - len(sm.msgs) + if mc.sigType == crypto.SigTypeBLS { + if msgLimit > sm.blsLimit { + msgLimit = sm.blsLimit + } + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if msgLimit > sm.secpLimit { + msgLimit = sm.secpLimit + } + } + + if mc.gasLimit > sm.gasLimit || len(mc.msgs) > msgLimit { + mc.Trim(sm.gasLimit, msgLimit, mp, baseFee) + } +} + +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -73,7 +210,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0. Load messages from the target tipset; if it is the same as the current tipset in // the mpool, then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -89,10 +226,10 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { return result, nil } @@ -117,19 +254,21 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ return result, nil } - // 3. Parition chains into blocks (without trimming) + // 3. Partition chains into blocks (without trimming) // we use the full blockGasLimit (as opposed to the residual gas limit from the - // priority message selection) as we have to account for what other miners are doing + // priority message selection) as we have to account for what other block providers are doing nextChain := 0 partitions := make([][]*msgChain, MaxBlocks) for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { gasLimit := int64(build.BlockGasLimit) + msgLimit := build.BlockMessageLimit for nextChain < len(chains) { chain := chains[nextChain] nextChain++ partitions[i] = append(partitions[i], chain) gasLimit -= chain.gasLimit - if gasLimit < minGas { + msgLimit -= len(chain.msgs) + if gasLimit < minGas || msgLimit <= 0 { break } } @@ -158,7 +297,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ }) // 6. Merge the head chains to produce the list of messages selected for inclusion - // subject to the residual gas limit + // subject to the residual block limits // When a chain is merged in, all its previous dependent chains *must* also be // merged in or we'll have a broken block startMerge := time.Now() @@ -174,35 +313,16 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - } - - // does it all fit in the block? - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - // adjust the effective pefromance for all subsequent chains + if result.tryToAddWithDeps(chain, mp, baseFee) { + // adjust the effective performance for all subsequent chains if next := chain.next; next != nil && next.effPerf > 0 { next.effPerf += next.parentOffset for next = next.next; next != nil && next.effPerf > 0; next = next.next { next.setEffPerf() } } - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit - // resort to account for already merged chains and effective performance adjustments + // re-sort to account for already merged chains and effective performance adjustments // the sort *must* be stable or we end up getting negative gasPerfs pushed up. sort.SliceStable(chains[i+1:], func(i, j int) bool { return chains[i].BeforeEffective(chains[j]) @@ -211,7 +331,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // we can't fit this chain and its dependencies because of block gasLimit -- we are + // we can't fit this chain and its dependencies because of block limits -- we are // at the edge last = i break @@ -222,18 +342,22 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 7. We have reached the edge of what can fit wholesale; if we still hae available // gasLimit to pack some more chains, then trim the last chain and push it down. - // Trimming invalidaates subsequent dependent chains so that they can't be selected + // Trimming invalidates subsequent dependent chains so that they can't be selected // as their dependency cannot be (fully) included. // We do this in a loop because the blocker might have been inordinately large and // we might have to do it multiple times to satisfy tail packing startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { - // trim if necessary - if chains[last].gasLimit > gasLimit { - chains[last].Trim(gasLimit, mp, baseFee) + for result.gasLimit >= minGas && last < len(chains) { + + if !chains[last].valid { + last++ + continue tailLoop } + // trim if necessary + result.trimChain(chains[last], mp, baseFee) + // push down if it hasn't been invalidated if chains[last].valid { for i := last; i < len(chains)-1; i++ { @@ -245,7 +369,7 @@ tailLoop: } // select the next (valid and fitting) chain and its dependencies for inclusion - for i, chain := range chains[last:] { + for _, chain := range chains[last:] { // has the chain been invalidated? if !chain.valid { continue @@ -261,45 +385,10 @@ tailLoop: break tailLoop } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // does it all fit in the bock - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // it doesn't all fit; now we have to take into account the dependent chains before - // making a decision about trimming or invalidating. - // if the dependencies exceed the gas limit, then we must invalidate the chain - // as it can never be included. - // Otherwise we can just trim and continue - if depGasLimit > gasLimit { - chain.Invalidate() - last += i + 1 - continue tailLoop - } - - // dependencies fit, just trim it - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - last += i continue tailLoop } @@ -311,17 +400,17 @@ tailLoop: log.Infow("pack tail chains done", "took", dt) } - // if we have gasLimit to spare, pick some random (non-negative) chains to fill the block - // we pick randomly so that we minimize the probability of duplication among all miners - if gasLimit >= minGas { - randomCount := 0 + // if we have room to spare, pick some random (non-negative) chains to fill the block + // we pick randomly so that we minimize the probability of duplication among all block producers + if result.gasLimit >= minGas && len(result.msgs) <= build.BlockMessageLimit { + preRandomLength := len(result.msgs) startRandom := time.Now() shuffleChains(chains) for _, chain := range chains { // have we filled the block - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { break } @@ -335,59 +424,31 @@ tailLoop: continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // do the deps fit? if the deps won't fit, invalidate the chain - if depGasLimit > gasLimit { - chain.Invalidate() + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // do they fit as is? if it doesn't, trim to make it fit if possible - if chainGasLimit > gasLimit { - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - - if !chain.valid { - continue - } + if chain.valid { + // chain got trimmed on the previous call to tryToAddWithDeps, can now be included + result.tryToAddWithDeps(chain, mp, baseFee) + continue } - - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - randomCount += len(curChain.msgs) - } - - chain.merged = true - result = append(result, chain.msgs...) - randomCount += len(chain.msgs) - gasLimit -= chainGasLimit } if dt := time.Since(startRandom); dt > time.Millisecond { log.Infow("pack random tail chains done", "took", dt) } - if randomCount > 0 { + if len(result.msgs) != preRandomLength { log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection", - randomCount) + len(result.msgs)-preRandomLength) } } return result, nil } -func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -397,7 +458,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0. Load messages for the target tipset; if it is the same as the current tipset in the mpool // then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -413,10 +474,10 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) > build.BlockMessageLimit { return result, nil } @@ -442,7 +503,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // 3. Merge the head chains to produce the list of messages selected for inclusion, subject to - // the block gas limit. + // the block gas and message limits. startMerge := time.Now() last := len(chains) for i, chain := range chains { @@ -452,13 +513,12 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // does it fit in the block? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } - // we can't fit this chain because of block gasLimit -- we are at the edge + // we can't fit this chain because of block limits -- we are at the edge last = i break } @@ -474,9 +534,9 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // have to do it multiple times to satisfy tail packing. startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim - chains[last].Trim(gasLimit, mp, baseFee) + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -501,9 +561,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -523,7 +582,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *selectedMessages { start := time.Now() defer func() { if dt := time.Since(start); dt > time.Millisecond { @@ -531,8 +590,12 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } }() mpCfg := mp.getConfig() - result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow) - gasLimit := int64(build.BlockGasLimit) + result := &selectedMessages{ + msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow), + gasLimit: int64(build.BlockGasLimit), + blsLimit: cbg.MaxLength, + secpLimit: cbg.MaxLength, + } minGas := int64(gasguess.MinGas) // 1. Get priority actor chains @@ -542,7 +605,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a pk, err := mp.resolveToKey(ctx, actor) if err != nil { log.Debugf("mpooladdlocal failed to resolve sender: %s", err) - return nil, gasLimit + return result } mset, ok := pending[pk] @@ -554,9 +617,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a chains = append(chains, next...) } } - if len(chains) == 0 { - return nil, gasLimit + return result } // 2. Sort the chains @@ -566,7 +628,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) - return nil, gasLimit + return result } // 3. Merge chains until the block limit, as long as they have non-negative gas performance @@ -576,9 +638,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a break } - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -588,9 +649,10 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim, discarding negative performing messages - chains[last].Trim(gasLimit, mp, baseFee) + + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -615,9 +677,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -631,10 +692,10 @@ tailLoop: break } - return result, gasLimit + return result } -func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { +func (mp *MessagePool) getPendingMessages(ctx context.Context, curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { start := time.Now() result := make(map[address.Address]map[uint64]*types.SignedMessage) @@ -670,7 +731,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. return result, nil } - if err := mp.runHeadChange(curTs, ts, result); err != nil { + if err := mp.runHeadChange(ctx, curTs, ts, result); err != nil { return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err) } @@ -778,11 +839,16 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 return nil } + // if we have more messages from this sender than can fit in a block, drop the extra ones + if len(msgs) > build.BlockMessageLimit { + msgs = msgs[:build.BlockMessageLimit] + } + // ok, now we can construct the chains using the messages we have // invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged // and increase the gasPerf of the first chain // We do this in two passes: - // - in the first pass we create chains that aggreagate messages with non-decreasing gasPerf + // - in the first pass we create chains that aggregate messages with non-decreasing gasPerf // - in the second pass we merge chains to maintain the invariant. var chains []*msgChain var curChain *msgChain @@ -794,6 +860,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 chain.gasLimit = m.Message.GasLimit chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit) chain.valid = true + chain.sigType = m.Signature.Type return chain } @@ -808,7 +875,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 gasLimit := curChain.gasLimit + m.Message.GasLimit gasPerf := mp.getGasPerf(gasReward, gasLimit) - // try to add the message to the current chain -- if it decreases the gasPerf, then make a + // try to add the message to the current chain -- if it decreases the gasPerf, or then make a // new chain if gasPerf < curChain.gasPerf { chains = append(chains, curChain) @@ -868,9 +935,9 @@ func (mc *msgChain) Before(other *msgChain) bool { (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) } -func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) { +func (mc *msgChain) Trim(gasLimit int64, msgLimit int, mp *MessagePool, baseFee types.BigInt) { i := len(mc.msgs) - 1 - for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) { + for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0 || i >= msgLimit) { gasReward := mp.getGasReward(mc.msgs[i], baseFee) mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) mc.gasLimit -= mc.msgs[i].Message.GasLimit @@ -893,6 +960,7 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) mc.msgs = mc.msgs[:i+1] } + // TODO: if the trim above is a no-op, this (may) needlessly invalidates the next chain if mc.next != nil { mc.next.Invalidate() mc.next = nil diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 0f8fd8ee6..2ae99cd77 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -13,6 +13,10 @@ import ( "sort" "testing" + "github.com/filecoin-project/go-state-types/crypto" + + cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" @@ -61,7 +65,7 @@ func makeTestMessage(w *wallet.LocalWallet, from, to address.Address, nonce uint func makeTestMpool() (*MessagePool, *testMpoolAPI) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) if err != nil { panic(err) } @@ -527,7 +531,7 @@ func TestBasicMessageSelection(t *testing.T) { } } -func TestMessageSelectionTrimming(t *testing.T) { +func TestMessageSelectionTrimmingGas(t *testing.T) { mp, tma := makeTestMpool() // the actors @@ -577,7 +581,7 @@ func TestMessageSelectionTrimming(t *testing.T) { expected := int(build.BlockGasLimit / gasLimit) if len(msgs) != expected { - t.Fatalf("expected %d messages, bug got %d", expected, len(msgs)) + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) } mGasLimit := int64(0) @@ -590,6 +594,199 @@ func TestMessageSelectionTrimming(t *testing.T) { } +func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + + // create a larger than selectable chain + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := cbg.MaxLength + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + +} + +func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 larger than selectable chains + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + if counts[crypto.SigTypeBLS] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + +func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 almost max-length chains of equal value + i := 0 + for i = 0; i < cbg.MaxLength-1; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + // a1's 8192th message is worth more than a2's + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + + i++ + + // a2's (unselectable) 8193rd message is worth SO MUCH + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000000) + mustAdd(t, mp, m) + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + // we should have taken the secp chain + if counts[crypto.SigTypeSecp256k1] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + func TestPriorityMessageSelection(t *testing.T) { mp, tma := makeTestMpool() @@ -978,7 +1175,7 @@ func TestOptimalMessageSelection2(t *testing.T) { func TestOptimalMessageSelection3(t *testing.T) { // this test uses 10 actors sending a block of messages to each other, with the the first // actors paying higher gas premium than the subsequent actors. - // We select with a low ticket quality; the chain depenent merging algorithm should pick + // We select with a low ticket quality; the chain dependent merging algorithm should pick // messages from the median actor from the start mp, tma := makeTestMpool() @@ -1109,11 +1306,13 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu logging.SetLogLevel("messagepool", "error") // 1. greedy selection - greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts) + gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts) if err != nil { t.Fatal(err) } + greedyMsgs := gm.msgs + totalGreedyCapacity := 0.0 totalGreedyReward := 0.0 totalOptimalCapacity := 0.0 diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index 063d1aa7d..e2229bb51 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -84,7 +84,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb } // If the callback executed successfully, write the nonce to the datastore - if err := ms.saveNonce(msg.From, nonce); err != nil { + if err := ms.saveNonce(ctx, msg.From, nonce); err != nil { return nil, xerrors.Errorf("failed to save nonce: %w", err) } @@ -105,7 +105,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // Get the next nonce for this address from the datastore addrNonceKey := ms.dstoreKey(addr) - dsNonceBytes, err := ms.ds.Get(addrNonceKey) + dsNonceBytes, err := ms.ds.Get(ctx, addrNonceKey) switch { case xerrors.Is(err, datastore.ErrNotFound): @@ -139,7 +139,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // saveNonce increments the nonce for this address and writes it to the // datastore -func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { +func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error { // Increment the nonce nonce++ @@ -150,7 +150,7 @@ func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { if err != nil { return xerrors.Errorf("failed to marshall nonce: %w", err) } - err = ms.ds.Put(addrNonceKey, buf.Bytes()) + err = ms.ds.Put(ctx, addrNonceKey, buf.Bytes()) if err != nil { return xerrors.Errorf("failed to write nonce to datastore: %w", err) } diff --git a/chain/rand/rand.go b/chain/rand/rand.go index 90e9a514b..6a54a2427 100644 --- a/chain/rand/rand.go +++ b/chain/rand/rand.go @@ -4,6 +4,8 @@ import ( "context" "encoding/binary" + "github.com/filecoin-project/go-state-types/network" + logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/beacon" @@ -48,7 +50,7 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -70,12 +72,12 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch return randTs, nil } -func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -116,23 +118,13 @@ func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule) vm.Ra } // network v0-12 -func (sr *stateRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, true) -} - -// network v13 and on -func (sr *stateRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, false) -} - -// network v0-12 -func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, true) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -143,13 +135,13 @@ func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.Doma } // network v13 -func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, false) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -160,9 +152,9 @@ func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.Doma } // network v14 and on -func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { if filecoinEpoch < 0 { - return sr.GetBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) } be, err := sr.extractBeaconEntryForEpoch(ctx, filecoinEpoch) @@ -174,6 +166,24 @@ func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.Doma return DrawRandomness(be.Data, pers, filecoinEpoch, entropy) } +func (sr *stateRand) GetChainRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + if nv >= network.Version13 { + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, false) + } + + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, true) +} + +func (sr *stateRand) GetBeaconRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + if nv >= network.Version14 { + return sr.getBeaconRandomnessV3(ctx, pers, filecoinEpoch, entropy) + } else if nv == network.Version13 { + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + } else { + return sr.getBeaconRandomnessV1(ctx, pers, filecoinEpoch, entropy) + } +} + func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false) if err != nil { @@ -190,7 +200,7 @@ func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpo } } - next, err := sr.cs.LoadTipSet(randTs.Parents()) + next, err := sr.cs.LoadTipSet(ctx, randTs.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for beacon entry: %w", err) } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index f230f7faa..9a518a622 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -159,7 +159,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { /* inline-gen start */ - case network.Version13, network.Version14: + case network.Version13, network.Version14, network.Version15: /* inline-gen end */ return types.StateTreeVersion4, nil diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 4d016b7ab..6804b8126 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" cid "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" @@ -300,12 +301,12 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([ } func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { - ts, err := sm.ChainStore().LoadTipSet(tsk) + ts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err) } - prev, err := sm.ChainStore().GetLatestBeaconEntry(ts) + prev, err := sm.ChainStore().GetLatestBeaconEntry(ctx, ts) if err != nil { if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" { return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err) diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 7cc50e710..ef29e1659 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -40,7 +40,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = sm.cs.GetHeaviestTipSet() // Search back till we find a height with no fork, or we reach the beginning. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -51,7 +51,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -155,7 +155,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri // height to have no fork, because we'll run it inside this // function before executing the given message. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -166,7 +166,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go index bc259f227..bca32429b 100644 --- a/chain/stmgr/read.go +++ b/chain/stmgr/read.go @@ -13,8 +13,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, error) { - ts, err := sm.cs.GetTipSetFromKey(tsk) +func (sm *StateManager) ParentStateTsk(ctx context.Context, tsk types.TipSetKey) (*state.StateTree, error) { + ts, err := sm.cs.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -57,8 +57,8 @@ func (sm *StateManager) LoadActor(_ context.Context, addr address.Address, ts *t return state.GetActor(addr) } -func (sm *StateManager) LoadActorTsk(_ context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { - state, err := sm.ParentStateTsk(tsk) +func (sm *StateManager) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + state, err := sm.ParentStateTsk(ctx, tsk) if err != nil { return nil, err } diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 45c98a855..7e6de91d4 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -20,7 +20,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid ctx, cancel := context.WithCancel(ctx) defer cancel() - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } @@ -40,7 +40,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) } - r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head[0].Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -93,7 +93,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { return candidateTs, candidateRcp, candidateFm, nil } - r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, val.Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -130,12 +130,12 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid } func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } - r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -201,7 +201,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet return nil, nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(cur.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("failed to load tipset during msg wait searchback: %w", err) } @@ -214,7 +214,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet // check that between cur and parent tipset the nonce fell into range of our message if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) { - r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, cur, m.Cid(), m.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err) } @@ -229,18 +229,18 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet } } -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) tipsetExecutedMessage(ctx context.Context, ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { // The genesis block did not execute any messages if ts.Height() == 0 { return nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, cid.Undef, err } - cm, err := sm.cs.MessagesForTipset(pts) + cm, err := sm.cs.MessagesForTipset(ctx, pts) if err != nil { return nil, cid.Undef, err } @@ -267,7 +267,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm } } - pr, err := sm.cs.GetParentReceipt(ts.Blocks()[0], i) + pr, err := sm.cs.GetParentReceipt(ctx, ts.Blocks()[0], i) if err != nil { return nil, cid.Undef, err } diff --git a/chain/stmgr/searchwait_test.go b/chain/stmgr/searchwait_test.go index 1e4776ff7..b8cd7ddcf 100644 --- a/chain/stmgr/searchwait_test.go +++ b/chain/stmgr/searchwait_test.go @@ -75,7 +75,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(rmb) + err = cg.Blockstore().Put(ctx, rmb) if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(nrmb) + err = cg.Blockstore().Put(ctx, nrmb) if err != nil { t.Fatal(err) } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 67c0bdb6a..06b363733 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -320,7 +320,7 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts * func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error { tschain := []*types.TipSet{ts} for ts.Height() != 0 { - next, err := sm.cs.LoadTipSet(ts.Parents()) + next, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return err } @@ -372,7 +372,7 @@ func (sm *StateManager) VMSys() vm.SyscallBuilder { } func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().GetTipSetFromKey(tsk) + pts, err := sm.ChainStore().GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -380,18 +380,12 @@ func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personaliza r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) rnv := sm.GetNtwkVersion(ctx, randEpoch) - if rnv >= network.Version14 { - return r.GetBeaconRandomnessV3(ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - return r.GetBeaconRandomnessV2(ctx, personalization, randEpoch, entropy) - } - - return r.GetBeaconRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetBeaconRandomness(ctx, rnv, personalization, randEpoch, entropy) } func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().LoadTipSet(tsk) + pts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset key: %w", err) } @@ -399,9 +393,5 @@ func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personaliz r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) rnv := sm.GetNtwkVersion(ctx, randEpoch) - if rnv >= network.Version13 { - return r.GetChainRandomnessV2(ctx, personalization, randEpoch, entropy) - } - - return r.GetChainRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetChainRandomness(ctx, rnv, personalization, randEpoch, entropy) } diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index c9475a51e..0744c02aa 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -31,7 +31,7 @@ import ( // sets up information about the vesting schedule func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { - gb, err := sm.cs.GetGenesis() + gb, err := sm.cs.GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index ce4f60105..8b0f8daeb 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -155,7 +155,7 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types. } - lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents()) + lbts, err := sm.ChainStore().GetTipSetFromKey(ctx, nextTs.Parents()) if err != nil { return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err) } diff --git a/chain/store/basefee.go b/chain/store/basefee.go index 33367abcc..1d6b0760e 100644 --- a/chain/store/basefee.go +++ b/chain/store/basefee.go @@ -58,7 +58,7 @@ func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi seen := make(map[cid.Cid]struct{}) for _, b := range ts.Blocks() { - msg1, msg2, err := cs.MessagesForBlock(b) + msg1, msg2, err := cs.MessagesForBlock(ctx, b) if err != nil { return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err) } diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go index 81bbab6ea..73b45f3ad 100644 --- a/chain/store/checkpoint_test.go +++ b/chain/store/checkpoint_test.go @@ -10,6 +10,8 @@ import ( ) func TestChainCheckpoint(t *testing.T) { + ctx := context.Background() + cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) @@ -27,11 +29,11 @@ func TestChainCheckpoint(t *testing.T) { cs := cg.ChainStore() checkpoint := last - checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents()) + checkpointParents, err := cs.GetTipSetFromKey(ctx, checkpoint.Parents()) require.NoError(t, err) // Set the head to the block before the checkpoint. - err = cs.SetHead(checkpointParents) + err = cs.SetHead(ctx, checkpointParents) require.NoError(t, err) // Verify it worked. @@ -39,11 +41,11 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpointParents)) // Try to set the checkpoint in the future, it should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Then move the head back. - err = cs.SetHead(checkpoint) + err = cs.SetHead(ctx, checkpoint) require.NoError(t, err) // Verify it worked. @@ -51,7 +53,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // And checkpoint it. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.NoError(t, err) // Let the second miner miner mine a fork @@ -70,7 +72,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // Remove the checkpoint. - err = cs.RemoveCheckpoint() + err = cs.RemoveCheckpoint(ctx) require.NoError(t, err) // Now switch to the other fork. @@ -80,10 +82,10 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(last)) // Setting a checkpoint on the other fork should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Setting a checkpoint on this fork should succeed. - err = cs.SetCheckpoint(checkpointParents) + err = cs.SetCheckpoint(ctx, checkpointParents) require.NoError(t, err) } diff --git a/chain/store/index.go b/chain/store/index.go index 324fb7a63..f5bbbd438 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -31,7 +31,7 @@ type ChainIndex struct { skipLength abi.ChainEpoch } -type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) +type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) func NewChainIndex(lts loadTipSetFunc) *ChainIndex { sc, _ := lru.NewARC(DefaultChainIndexCacheSize) @@ -49,12 +49,12 @@ type lbEntry struct { target types.TipSetKey } -func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if from.Height()-to <= ci.skipLength { - return ci.walkBack(from, to) + return ci.walkBack(ctx, from, to) } - rounded, err := ci.roundDown(from) + rounded, err := ci.roundDown(ctx, from) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t for { cval, ok := ci.skipCache.Get(cur) if !ok { - fc, err := ci.fillCache(cur) + fc, err := ci.fillCache(ctx, cur) if err != nil { return nil, err } @@ -74,19 +74,19 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t if lbe.ts.Height() == to || lbe.parentHeight < to { return lbe.ts, nil } else if to > lbe.targetHeight { - return ci.walkBack(lbe.ts, to) + return ci.walkBack(ctx, lbe.ts, to) } cur = lbe.target } } -func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - return ci.walkBack(from, to) +func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + return ci.walkBack(ctx, from, to) } -func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { - ts, err := ci.loadTipSet(tsk) +func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { + ts, err := ci.loadTipSet(ctx, tsk) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { // will either be equal to ts.Height, or at least > ts.Parent.Height() rheight := ci.roundHeight(ts.Height()) - parent, err := ci.loadTipSet(ts.Parents()) + parent, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { if parent.Height() < rheight { skipTarget = parent } else { - skipTarget, err = ci.walkBack(parent, rheight) + skipTarget, err = ci.walkBack(ctx, parent, rheight) if err != nil { return nil, xerrors.Errorf("fillCache walkback: %w", err) } @@ -137,10 +137,10 @@ func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { return (h / ci.skipLength) * ci.skipLength } -func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { +func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { target := ci.roundHeight(ts.Height()) - rounded, err := ci.walkBack(ts, target) + rounded, err := ci.walkBack(ctx, ts, target) if err != nil { return nil, err } @@ -148,7 +148,7 @@ func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { return rounded, nil } -func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if to > from.Height() { return nil, xerrors.Errorf("looking for tipset with height greater than start point") } @@ -160,7 +160,7 @@ func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.Ti ts := from for { - pts, err := ci.loadTipSet(ts.Parents()) + pts, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 9bc31e5a8..b7f1d570f 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -35,7 +35,7 @@ func TestIndexSeeks(t *testing.T) { cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - _, err = cs.Import(bytes.NewReader(gencar)) + _, err = cs.Import(ctx, bytes.NewReader(gencar)) if err != nil { t.Fatal(err) } @@ -44,7 +44,7 @@ func TestIndexSeeks(t *testing.T) { if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { t.Fatal(err) } - assert.NoError(t, cs.SetGenesis(gen)) + assert.NoError(t, cs.SetGenesis(ctx, gen)) // Put 113 blocks from genesis for i := 0; i < 113; i++ { diff --git a/chain/store/messages.go b/chain/store/messages.go index 07ce83458..4dd3bfc1d 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -23,25 +23,25 @@ type storable interface { ToStorageBlock() (block.Block, error) } -func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { +func PutMessage(ctx context.Context, bs bstore.Blockstore, m storable) (cid.Cid, error) { b, err := m.ToStorageBlock() if err != nil { return cid.Undef, err } - if err := bs.Put(b); err != nil { + if err := bs.Put(ctx, b); err != nil { return cid.Undef, err } return b.Cid(), nil } -func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.chainBlockstore, m) +func (cs *ChainStore) PutMessage(ctx context.Context, m storable) (cid.Cid, error) { + return PutMessage(ctx, cs.chainBlockstore, m) } -func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { - m, err := cs.GetMessage(c) +func (cs *ChainStore) GetCMessage(ctx context.Context, c cid.Cid) (types.ChainMsg, error) { + m, err := cs.GetMessage(ctx, c) if err == nil { return m, nil } @@ -49,21 +49,21 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) } - return cs.GetSignedMessage(c) + return cs.GetSignedMessage(ctx, c) } -func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { +func (cs *ChainStore) GetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) { var msg *types.Message - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeMessage(b) return err }) return msg, err } -func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { +func (cs *ChainStore) GetSignedMessage(ctx context.Context, c cid.Cid) (*types.SignedMessage, error) { var msg *types.SignedMessage - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeSignedMessage(b) return err }) @@ -103,7 +103,7 @@ type BlockMessages struct { SecpkMessages []types.ChainMsg } -func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { +func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet) ([]BlockMessages, error) { // returned BlockMessages match block order in tipset applied := make(map[address.Address]uint64) @@ -142,7 +142,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err var out []BlockMessages for _, b := range ts.Blocks() { - bms, sms, err := cs.MessagesForBlock(b) + bms, sms, err := cs.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for block: %w", err) } @@ -181,8 +181,8 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err return out, nil } -func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - bmsgs, err := cs.BlockMsgsForTipset(ts) +func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + bmsgs, err := cs.BlockMsgsForTipset(ctx, ts) if err != nil { return nil, err } @@ -206,7 +206,7 @@ type mmCids struct { secpk []cid.Cid } -func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { +func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { o, ok := cs.mmCache.Get(mmc) if ok { mmcids := o.(*mmCids) @@ -215,7 +215,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) cst := cbor.NewCborStore(cs.chainLocalBlockstore) var msgmeta types.MsgMeta - if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { + if err := cst.Get(ctx, mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) } @@ -237,18 +237,18 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return blscids, secpkcids, nil } -func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages) +func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages) if err != nil { return nil, nil, err } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) if err != nil { return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) } @@ -256,8 +256,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, return blsmsgs, secpkmsgs, nil } -func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { - ctx := context.TODO() +func (cs *ChainStore) GetParentReceipt(ctx context.Context, b *types.BlockHeader, i int) (*types.MessageReceipt, error) { // block headers use adt0, for now. a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts) if err != nil { @@ -274,10 +273,10 @@ func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.Mess return &r, nil } -func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) { +func (cs *ChainStore) LoadMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) { msgs := make([]*types.Message, 0, len(cids)) for i, c := range cids { - m, err := cs.GetMessage(c) + m, err := cs.GetMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } @@ -288,10 +287,10 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er return msgs, nil } -func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) { +func (cs *ChainStore) LoadSignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) { msgs := make([]*types.SignedMessage, 0, len(cids)) for i, c := range cids { - m, err := cs.GetSignedMessage(c) + m, err := cs.GetSignedMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 1d4ce3758..61fa8bdc8 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -30,7 +30,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { - blk, err := unionBs.Get(c) + blk, err := unionBs.Get(ctx, c) if err != nil { return xerrors.Errorf("writing object to car, bs.Get: %w", err) } @@ -43,18 +43,18 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { +func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) { // TODO: writing only to the state blockstore is incorrect. // At this time, both the state and chain blockstores are backed by the // universal store. When we physically segregate the stores, we will need // to route state objects to the state blockstore, and chain objects to // the chain blockstore. - header, err := car.LoadCar(cs.StateBlockstore(), r) + header, err := car.LoadCar(ctx, cs.StateBlockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } - root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) + root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(header.Roots...)) if err != nil { return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) } @@ -82,7 +82,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return err } - data, err := cs.chainBlockstore.Get(blk) + data, err := cs.chainBlockstore.Get(ctx, blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } @@ -102,7 +102,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.Messages) { - mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) + mcids, err := recurseLinks(ctx, cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } @@ -123,7 +123,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.ParentStateRoot) { - cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + cids, err := recurseLinks(ctx, cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } @@ -168,12 +168,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return nil } -func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { +func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { if root.Prefix().Codec != cid.DagCBOR { return in, nil } - data, err := bs.Get(root) + data, err := bs.Get(ctx, root) if err != nil { return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) } @@ -192,7 +192,7 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid. in = append(in, c) var err error - in, err = recurseLinks(bs, walked, c, in) + in, err = recurseLinks(ctx, bs, walked, c, in) if err != nil { rerr = err } diff --git a/chain/store/store.go b/chain/store/store.go index f99c7f649..d86a66f12 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -207,17 +207,17 @@ func (cs *ChainStore) Close() error { return nil } -func (cs *ChainStore) Load() error { - if err := cs.loadHead(); err != nil { +func (cs *ChainStore) Load(ctx context.Context) error { + if err := cs.loadHead(ctx); err != nil { return err } - if err := cs.loadCheckpoint(); err != nil { + if err := cs.loadCheckpoint(ctx); err != nil { return err } return nil } -func (cs *ChainStore) loadHead() error { - head, err := cs.metadataDs.Get(chainHeadKey) +func (cs *ChainStore) loadHead(ctx context.Context) error { + head, err := cs.metadataDs.Get(ctx, chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil @@ -231,7 +231,7 @@ func (cs *ChainStore) loadHead() error { return xerrors.Errorf("failed to unmarshal stored chain head: %w", err) } - ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...)) + ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...)) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -241,8 +241,8 @@ func (cs *ChainStore) loadHead() error { return nil } -func (cs *ChainStore) loadCheckpoint() error { - tskBytes, err := cs.metadataDs.Get(checkpointKey) +func (cs *ChainStore) loadCheckpoint(ctx context.Context) error { + tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey) if err == dstore.ErrNotFound { return nil } @@ -256,7 +256,7 @@ func (cs *ChainStore) loadCheckpoint() error { return err } - ts, err := cs.LoadTipSet(tsk) + ts, err := cs.LoadTipSet(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -266,13 +266,13 @@ func (cs *ChainStore) loadCheckpoint() error { return nil } -func (cs *ChainStore) writeHead(ts *types.TipSet) error { +func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { return xerrors.Errorf("failed to marshal tipset: %w", err) } - if err := cs.metadataDs.Put(chainHeadKey, data); err != nil { + if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } @@ -341,13 +341,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - return cs.metadataDs.Has(key) + return cs.metadataDs.Has(ctx, key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Put(key, []byte{0}); err != nil { + if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } @@ -357,34 +357,34 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Delete(key); err != nil { + if err := cs.metadataDs.Delete(ctx, key); err != nil { return xerrors.Errorf("removing from valid block cache: %w", err) } return nil } -func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { +func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error { ts, err := types.NewTipSet([]*types.BlockHeader{b}) if err != nil { return err } - if err := cs.PutTipSet(context.TODO(), ts); err != nil { + if err := cs.PutTipSet(ctx, ts); err != nil { return err } - return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes()) + return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { for _, b := range ts.Blocks() { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } } - expanded, err := cs.expandTipset(ts.Blocks()[0]) + expanded, err := cs.expandTipset(ctx, ts.Blocks()[0]) if err != nil { return xerrors.Errorf("errored while expanding tipset: %w", err) } @@ -435,7 +435,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. - exceeds, err := cs.exceedsForkLength(cs.heaviest, ts) + exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts) if err != nil { return err } @@ -458,7 +458,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // FIXME: We may want to replace some of the logic in `syncFork()` with this. // `syncFork()` counts the length on both sides of the fork at the moment (we // need to settle on that) but here we just enforce it on the `synced` side. -func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) { +func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { if synced == nil || external == nil { // FIXME: If `cs.heaviest` is nil we should just bypass the entire // `MaybeTakeHeavierTipSet` logic (instead of each of the called @@ -482,7 +482,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // length). return true, nil } - external, err = cs.LoadTipSet(external.Parents()) + external, err = cs.LoadTipSet(ctx, external.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err) } @@ -505,7 +505,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // there is no common ancestor. return true, nil } - synced, err = cs.LoadTipSet(synced.Parents()) + synced, err = cs.LoadTipSet(ctx, synced.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err) } @@ -521,17 +521,17 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // CAUTION: Use it only for testing, such as to teleport the chain to a // particular tipset to carry out a benchmark, verification, etc. on a chain // segment. -func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error { +func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error { log.Warnf("(!!!) forcing a new head silently; new head: %s", ts) cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } cs.heaviest = ts - err := cs.writeHead(ts) + err := cs.writeHead(ctx, ts) if err != nil { err = xerrors.Errorf("failed to write chain head: %s", err) } @@ -561,7 +561,7 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo notifees = append(notifees, n) case r := <-out: - revert, apply, err := cs.ReorgOps(r.old, r.new) + revert, apply, err := cs.ReorgOps(ctx, r.old, r.new) if err != nil { log.Error("computing reorg ops failed: ", err) continue @@ -646,7 +646,7 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height()) cs.heaviest = ts - if err := cs.writeHead(ts); err != nil { + if err := cs.writeHead(ctx, ts); err != nil { log.Errorf("failed to write chain head: %s", err) return nil } @@ -656,14 +656,14 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. -func (cs *ChainStore) FlushValidationCache() error { - return FlushValidationCache(cs.metadataDs) +func (cs *ChainStore) FlushValidationCache(ctx context.Context) error { + return FlushValidationCache(ctx, cs.metadataDs) } -func FlushValidationCache(ds dstore.Batching) error { +func FlushValidationCache(ctx context.Context, ds dstore.Batching) error { log.Infof("clearing block validation cache...") - dsWalk, err := ds.Query(query.Query{ + dsWalk, err := ds.Query(ctx, query.Query{ // Potential TODO: the validation cache is not a namespace on its own // but is rather constructed as prefixed-key `foo:bar` via .Instance(), which // in turn does not work with the filter, which can match only on `foo/bar` @@ -683,7 +683,7 @@ func FlushValidationCache(ds dstore.Batching) error { return xerrors.Errorf("failed to run key listing query: %w", err) } - batch, err := ds.Batch() + batch, err := ds.Batch(ctx) if err != nil { return xerrors.Errorf("failed to open a DS batch: %w", err) } @@ -692,11 +692,11 @@ func FlushValidationCache(ds dstore.Batching) error { for _, k := range allKeys { if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) { delCnt++ - batch.Delete(dstore.RawKey(k.Key)) // nolint:errcheck + batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck } } - if err := batch.Commit(); err != nil { + if err := batch.Commit(ctx); err != nil { return xerrors.Errorf("failed to commit the DS batch: %w", err) } @@ -709,24 +709,24 @@ func FlushValidationCache(ds dstore.Batching) error { // This should only be called if something is broken and needs fixing. // // This function will bypass and remove any checkpoints. -func (cs *ChainStore) SetHead(ts *types.TipSet) error { +func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } return cs.takeHeaviestTipSet(context.TODO(), ts) } // RemoveCheckpoint removes the current checkpoint. -func (cs *ChainStore) RemoveCheckpoint() error { +func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - return cs.removeCheckpoint() + return cs.removeCheckpoint(ctx) } -func (cs *ChainStore) removeCheckpoint() error { - if err := cs.metadataDs.Delete(checkpointKey); err != nil { +func (cs *ChainStore) removeCheckpoint(ctx context.Context) error { + if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil { return err } cs.checkpoint = nil @@ -736,7 +736,7 @@ func (cs *ChainStore) removeCheckpoint() error { // SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. // // NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. -func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { +func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error { tskBytes, err := json.Marshal(ts.Key()) if err != nil { return err @@ -755,7 +755,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { } if !ts.Equals(cs.heaviest) { - anc, err := cs.IsAncestorOf(ts, cs.heaviest) + anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest) if err != nil { return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) } @@ -764,7 +764,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) } } - err = cs.metadataDs.Put(checkpointKey, tskBytes) + err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes) if err != nil { return err } @@ -781,9 +781,9 @@ func (cs *ChainStore) GetCheckpoint() *types.TipSet { } // Contains returns whether our BlockStore has all blocks in the supplied TipSet. -func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { +func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { - has, err := cs.chainBlockstore.Has(c) + has, err := cs.chainBlockstore.Has(ctx, c) if err != nil { return false, err } @@ -797,16 +797,16 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. -func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { +func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { var blk *types.BlockHeader - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { blk, err = types.DecodeBlock(b) return err }) return blk, err } -func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { v, ok := cs.tsCache.Get(tsk) if ok { return v.(*types.TipSet), nil @@ -819,7 +819,7 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { for i, c := range cids { i, c := i, c eg.Go(func() error { - b, err := cs.GetBlock(c) + b, err := cs.GetBlock(ctx, c) if err != nil { return xerrors.Errorf("get block %s: %w", c, err) } @@ -844,14 +844,14 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { } // IsAncestorOf returns true if 'a' is an ancestor of 'b' -func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { +func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) { if b.Height() <= a.Height() { return false, nil } cur := b for !a.Equals(cur) && cur.Height() > a.Height() { - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return false, err } @@ -862,13 +862,13 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { return cur.Equals(a), nil } -func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) { - l, _, err := cs.ReorgOps(a, b) +func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) { + l, _, err := cs.ReorgOps(ctx, a, b) if err != nil { return nil, err } - return cs.LoadTipSet(l[len(l)-1].Parents()) + return cs.LoadTipSet(ctx, l[len(l)-1].Parents()) } // ReorgOps takes two tipsets (which can be at different heights), and walks @@ -879,11 +879,11 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, // ancestor. // // If an error happens along the way, we return the error with nil slices. -func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { - return ReorgOps(cs.LoadTipSet, a, b) +func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + return ReorgOps(ctx, cs.LoadTipSet, a, b) } -func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { +func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { left := a right := b @@ -891,7 +891,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS for !left.Equals(right) { if left.Height() > right.Height() { leftChain = append(leftChain, left) - par, err := lts(left.Parents()) + par, err := lts(ctx, left.Parents()) if err != nil { return nil, nil, err } @@ -899,7 +899,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS left = par } else { rightChain = append(rightChain, right) - par, err := lts(right.Parents()) + par, err := lts(ctx, right.Parents()) if err != nil { log.Infof("failed to fetch right.Parents: %s", err) return nil, nil, err @@ -921,7 +921,7 @@ func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) { return } -func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { +func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error { cs.tstLk.Lock() defer cs.tstLk.Unlock() @@ -931,7 +931,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { log.Debug("tried to add block to tipset tracker that was already there") return nil } - h, err := cs.GetBlock(oc) + h, err := cs.GetBlock(ctx, oc) if err == nil && h != nil { if h.Miner == b.Miner { log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid()) @@ -960,7 +960,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { return nil } -func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { +func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error { sbs := make([]block.Block, len(b)) for i, header := range b { @@ -982,13 +982,13 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { end = len(b) } - err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end])) + err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end])) } return err } -func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { +func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) { // Hold lock for the whole function for now, if it becomes a problem we can // fix pretty easily cs.tstLk.Lock() @@ -1007,7 +1007,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) continue } - h, err := cs.GetBlock(bhc) + h, err := cs.GetBlock(ctx, bhc) if err != nil { return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) } @@ -1029,11 +1029,11 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) } func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } - ts, err := cs.expandTipset(b) + ts, err := cs.expandTipset(ctx, b) if err != nil { return err } @@ -1045,8 +1045,8 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error return nil } -func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.metadataDs.Get(dstore.NewKey("0")) +func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) { + data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0")) if err != nil { return nil, err } @@ -1056,22 +1056,22 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return nil, err } - return cs.GetBlock(c) + return cs.GetBlock(ctx, c) } // GetPath returns the sequence of atomic head change operations that // need to be applied in order to switch the head of the chain from the `from` // tipset to the `to` tipset. func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - fts, err := cs.LoadTipSet(from) + fts, err := cs.LoadTipSet(ctx, from) if err != nil { return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) } - tts, err := cs.LoadTipSet(to) + tts, err := cs.LoadTipSet(ctx, to) if err != nil { return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) } - revert, apply, err := cs.ReorgOps(fts, tts) + revert, apply, err := cs.ReorgOps(ctx, fts, tts) if err != nil { return nil, xerrors.Errorf("error getting tipset branches: %w", err) } @@ -1108,11 +1108,11 @@ func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store { return ActorStore(ctx, cs.stateBlockstore) } -func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { +func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) { var out []*types.FullBlock for _, b := range ts.Blocks() { - bmsgs, smsgs, err := cs.MessagesForBlock(b) + bmsgs, smsgs, err := cs.MessagesForBlock(ctx, b) if err != nil { // TODO: check for 'not found' errors, and only return nil if this // is actually a 'not found' error @@ -1154,7 +1154,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t if lbts.Height() < h { log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) - lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) + lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h) if err != nil { return nil, err } @@ -1164,7 +1164,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t return lbts, nil } - return cs.LoadTipSet(lbts.Parents()) + return cs.LoadTipSet(ctx, lbts.Parents()) } func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove @@ -1190,14 +1190,14 @@ func breakWeightTie(ts1, ts2 *types.TipSet) bool { return false } -func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { if tsk.IsEmpty() { return cs.GetHeaviestTipSet(), nil } - return cs.LoadTipSet(tsk) + return cs.LoadTipSet(ctx, tsk) } -func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { +func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1209,7 +1209,7 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 2004b266c..a759a48a8 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -109,7 +109,7 @@ func TestChainExportImport(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } @@ -144,12 +144,12 @@ func TestChainExportImportFull(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } - err = cs.SetHead(last) + err = cs.SetHead(context.Background(), last) if err != nil { t.Fatal(err) } diff --git a/chain/sync.go b/chain/sync.go index 34867b136..0293ae25e 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -119,8 +119,8 @@ type SyncManagerCtor func(syncFn SyncFunc) SyncManager type Genesis *types.TipSet -func LoadGenesis(sm *stmgr.StateManager) (Genesis, error) { - gen, err := sm.ChainStore().GetGenesis() +func LoadGenesis(ctx context.Context, sm *stmgr.StateManager) (Genesis, error) { + gen, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return nil, xerrors.Errorf("getting genesis block: %w", err) } @@ -227,7 +227,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore - if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, fts.TipSet().Blocks()...); err != nil { log.Warn("failed to persist incoming block header: ", err) return false } @@ -298,11 +298,11 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { // into the blockstore. blockstore := bstore.NewMemory() cst := cbor.NewCborStore(blockstore) - + ctx := context.Background() var bcids, scids []cid.Cid for _, m := range fblk.BlsMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } @@ -310,7 +310,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { } for _, m := range fblk.SecpkMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } @@ -360,7 +360,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { // TODO: should probably expose better methods on the blockstore for this operation var blks []blocks.Block for c := range cids { - b, err := from.Get(c) + b, err := from.Get(ctx, c) if err != nil { return err } @@ -368,7 +368,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { blks = append(blks, b) } - if err := to.PutMany(blks); err != nil { + if err := to.PutMany(ctx, blks); err != nil { return err } @@ -463,7 +463,7 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e // {hint/usage} This is used from the HELLO protocol, to fetch the greeting // peer's heaviest tipset if we don't have it. func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { + if fts, err := syncer.tryLoadFullTipSet(ctx, tsk); err == nil { return fts, nil } @@ -474,15 +474,15 @@ func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipS // tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full // representation of it containing FullBlocks. If ALL blocks are not found // locally, it errors entirely with blockstore.ErrNotFound. -func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { - ts, err := syncer.store.LoadTipSet(tsk) +func (syncer *Syncer) tryLoadFullTipSet(ctx context.Context, tsk types.TipSetKey) (*store.FullTipSet, error) { + ts, err := syncer.store.LoadTipSet(ctx, tsk) if err != nil { return nil, err } fts := &store.FullTipSet{} for _, b := range ts.Blocks() { - bmsgs, smsgs, err := syncer.store.MessagesForBlock(b) + bmsgs, smsgs, err := syncer.store.MessagesForBlock(ctx, b) if err != nil { return nil, err } @@ -583,7 +583,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, return xerrors.Errorf("validating block %s: %w", b.Cid(), err) } - if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { + if err := syncer.sm.ChainStore().AddToTipSetTracker(ctx, b.Header); err != nil { return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) } return nil @@ -755,7 +755,7 @@ loop: } // If, for some reason, we have a suffix of the chain locally, handle that here - ts, err := syncer.store.LoadTipSet(at) + ts, err := syncer.store.LoadTipSet(ctx, at) if err == nil { acceptedBlocks = append(acceptedBlocks, at.Cids()...) @@ -838,7 +838,7 @@ loop: return blockSet, nil } - knownParent, err := syncer.store.LoadTipSet(known.Parents()) + knownParent, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -892,7 +892,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, err } - nts, err := syncer.store.LoadTipSet(known.Parents()) + nts, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -928,7 +928,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, ErrForkCheckpoint } - nts, err = syncer.store.LoadTipSet(nts.Parents()) + nts, err = syncer.store.LoadTipSet(ctx, nts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next local tipset: %w", err) } @@ -965,7 +965,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) for i := len(headers) - 1; i >= 0; { - fts, err := syncer.store.TryFillTipSet(headers[i]) + fts, err := syncer.store.TryFillTipSet(ctx, headers[i]) if err != nil { return err } @@ -1138,7 +1138,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co for _, m := range bst.Bls { //log.Infof("putting BLS message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("BLS message processing failed: %w", err) } @@ -1148,7 +1148,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) } //log.Infof("putting secp256k1 message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("secp256k1 message processing failed: %w", err) } @@ -1201,7 +1201,7 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t for _, ts := range headers { toPersist = append(toPersist, ts.Blocks()...) } - if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, toPersist...); err != nil { err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err) ss.Error(err) return err @@ -1245,7 +1245,7 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { return bbr.String(), ok } -func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { +func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1257,7 +1257,7 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := syncer.store.LoadTipSet(cur.Parents()) + next, err := syncer.store.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/sync_test.go b/chain/sync_test.go index ccaa41b0d..2949ecf28 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -206,20 +206,21 @@ func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) } func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bool) { + ctx := context.TODO() for _, fb := range fts.Blocks { var b types.BlockMsg // -1 to match block.Height b.Header = fb.Header for _, msg := range fb.SecpkMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.SecpkMessages = append(b.SecpkMessages, c) } for _, msg := range fb.BlsMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.BlsMessages = append(b.BlsMessages, c) @@ -299,7 +300,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { lastTs := blocks[len(blocks)-1].Blocks for _, lastB := range lastTs { cs := out.(*impl.FullNodeAPI).ChainAPI.Chain - require.NoError(tu.t, cs.AddToTipSetTracker(lastB.Header)) + require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header)) err = cs.AddBlock(tu.ctx, lastB.Header) require.NoError(tu.t, err) } @@ -750,7 +751,7 @@ func TestDuplicateNonce(t *testing.T) { t.Fatal("included message should be in exec trace") } - mft, err := tu.g.ChainStore().MessagesForTipset(ts1.TipSet()) + mft, err := tu.g.ChainStore().MessagesForTipset(context.TODO(), ts1.TipSet()) require.NoError(t, err) require.True(t, len(mft) == 1, "only expecting one message for this tipset") require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message") diff --git a/chain/vm/gas.go b/chain/vm/gas.go index 206a55d36..27d9c8d94 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,13 +3,14 @@ package vm import ( "fmt" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/build" - vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" ) @@ -73,9 +74,10 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof5.SealVerifyInfo) GasCharge - OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge - OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof7.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge + OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } @@ -227,7 +229,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr5.Syscalls + under vmr.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -261,7 +263,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof7.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -269,7 +271,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof7.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -286,14 +288,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -307,9 +309,16 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealV return ps.under.BatchVerifySeals(inp) } -func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error { ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyAggregateSeals(aggregate) } + +func (ps pricedSyscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ps.chargeGas(ps.pl.OnVerifyReplicaUpdate(update)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyReplicaUpdate(update) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 13c5fdd86..548227a33 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -3,8 +3,7 @@ package vm import ( "fmt" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -206,14 +205,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr } // OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifySeal(info proof7.SealVerifyInfo) GasCharge { // TODO: this needs more cost tunning, check with @lotus // this is not used return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } // OnVerifyAggregateSeals -func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge { proofType := aggregate.SealProof perProof, ok := pl.verifyAggregateSealPer[proofType] if !ok { @@ -228,8 +227,14 @@ func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVeri return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) } +// OnVerifyReplicaUpdate +func (pl *pricelistV0) OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge { + // TODO: do the thing + return GasCharge{} +} + // OnVerifyPost -func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" var proofType abi.RegisteredPoStProof diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 85357e51b..8a7a4c8c9 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -16,7 +16,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index ea49abff3..5716b5006 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -26,6 +26,7 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" /* inline-gen end */ @@ -130,6 +131,8 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin5.AccountActorCodeID case actors.Version6: code = builtin6.AccountActorCodeID + case actors.Version7: + code = builtin7.AccountActorCodeID /* inline-gen end */ default: panic("unsupported actors version") diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 6e94030bd..9bbed4030 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "fmt" + "os" gruntime "runtime" "time" @@ -16,8 +17,12 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime" + rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime" rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime" + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -52,11 +57,11 @@ func (m *Message) ValueReceived() abi.TokenAmount { } // EnableGasTracing, if true, outputs gas tracing in execution traces. -var EnableGasTracing = false +var EnableGasTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1" type Runtime struct { - rt5.Message - rt5.Syscalls + rt7.Message + rt7.Syscalls ctx context.Context @@ -142,7 +147,12 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { var _ rt0.Runtime = (*Runtime)(nil) var _ rt5.Runtime = (*Runtime)(nil) +var _ rt2.Runtime = (*Runtime)(nil) +var _ rt3.Runtime = (*Runtime)(nil) +var _ rt4.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) var _ rt6.Runtime = (*Runtime)(nil) +var _ rt7.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { @@ -214,16 +224,8 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) } func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - - if rnv >= network.Version13 { - res, err = rt.vm.rand.GetChainRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetChainRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetChainRandomness(rt.ctx, rnv, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) @@ -232,17 +234,8 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat } func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - if rnv >= network.Version14 { - res, err = rt.vm.rand.GetBeaconRandomnessV3(rt.ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - res, err = rt.vm.rand.GetBeaconRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetBeaconRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, rnv, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) @@ -323,7 +316,7 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) { } // Transfer the executing actor's balance to the beneficiary - if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { + if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance, rt.NetworkVersion()); err != nil { panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) } } diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 0cbefd1fd..b8c027bd7 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,6 +7,8 @@ import ( goruntime "runtime" "sync" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/minio/blake2b-simd" @@ -26,8 +28,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) func init() { @@ -36,10 +38,10 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime7.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime7.Syscalls { return &syscallShim{ ctx: ctx, @@ -90,7 +92,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -133,14 +135,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime5.ConsensusFault + var consensusFault *runtime7.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultDoubleForkMining, + Type: runtime7.ConsensusFaultDoubleForkMining, } } @@ -148,10 +150,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultTimeOffsetMining, + Type: runtime7.ConsensusFaultTimeOffsetMining, } } @@ -171,10 +173,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultParentGrinding, + Type: runtime7.ConsensusFaultParentGrinding, } } } @@ -286,6 +288,7 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify if err != nil { return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) } + if !ok { return fmt.Errorf("invalid aggregate proof") } @@ -293,6 +296,19 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify return nil } +func (ss *syscallShim) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ok, err := ss.verifier.VerifyReplicaUpdate(update) + if err != nil { + return xerrors.Errorf("failed to verify replica update: %w", err) + } + + if !ok { + return fmt.Errorf("invalid replica update") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 36308fe03..453646030 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -82,10 +82,10 @@ type gasChargingBlocks struct { under cbor.IpldBlockstore } -func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { +func (bs *gasChargingBlocks) View(ctx context.Context, c cid.Cid, cb func([]byte) error) error { if v, ok := bs.under.(blockstore.Viewer); ok { bs.chargeGas(bs.pricelist.OnIpldGet()) - return v.View(c, func(b []byte) error { + return v.View(ctx, c, func(b []byte) error { // we have successfully retrieved the value; charge for it, even if the user-provided function fails. bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) bs.chargeGas(gasOnActorExec) @@ -93,16 +93,16 @@ func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { }) } // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. - blk, err := bs.Get(c) + blk, err := bs.Get(ctx, c) if err == nil && blk != nil { return cb(blk.RawData()) } return err } -func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { +func (bs *gasChargingBlocks) Get(ctx context.Context, c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) - blk, err := bs.under.Get(c) + blk, err := bs.under.Get(ctx, c) if err != nil { return nil, aerrors.Escalate(err, "failed to get block from blockstore") } @@ -112,10 +112,10 @@ func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { return blk, nil } -func (bs *gasChargingBlocks) Put(blk block.Block) error { +func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error { bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) - if err := bs.under.Put(blk); err != nil { + if err := bs.under.Put(ctx, blk); err != nil { return aerrors.Escalate(err, "failed to write data to disk") } bs.chargeGas(gasOnActorExec) @@ -202,9 +202,7 @@ type ( ) type VM struct { - cstate *state.StateTree - // TODO: Is base actually used? Can we delete it? - base cid.Cid + cstate *state.StateTree cst *cbor.BasicIpldStore buf *blockstore.BufferedBlockstore blockHeight abi.ChainEpoch @@ -214,6 +212,7 @@ type VM struct { ntwkVersion NtwkVersionGetter baseFee abi.TokenAmount lbStateGet LookbackStateGetter + baseCircSupply abi.TokenAmount Syscalls SyscallBuilder } @@ -239,9 +238,13 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { return nil, err } + baseCirc, err := opts.CircSupplyCalc(ctx, opts.Epoch, state) + if err != nil { + return nil, err + } + return &VM{ cstate: state, - base: opts.StateBase, cst: cst, buf: buf, blockHeight: opts.Epoch, @@ -251,16 +254,14 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { ntwkVersion: opts.NtwkVersion, Syscalls: opts.Syscalls, baseFee: opts.BaseFee, + baseCircSupply: baseCirc, lbStateGet: opts.LookbackState, }, nil } type Rand interface { - GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { @@ -339,7 +340,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { - if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { + if err := vm.transfer(msg.From, msg.To, msg.Value, vm.ntwkVersion(ctx, vm.blockHeight)); err != nil { return nil, aerrors.Wrap(err, "failed to transfer funds") } } @@ -706,7 +707,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err go func() { for b := range toFlush { - if err := to.PutMany(b); err != nil { + if err := to.PutMany(ctx, b); err != nil { close(freeBufs) errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) return @@ -735,7 +736,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } - if err := copyRec(from, to, root, batchCp); err != nil { + if err := copyRec(ctx, from, to, root, batchCp); err != nil { return xerrors.Errorf("copyRec: %w", err) } @@ -760,13 +761,13 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } -func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { +func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { if root.Prefix().MhType == 0 { // identity cid, skip return nil } - blk, err := from.Get(root) + blk, err := from.Get(ctx, root) if err != nil { return xerrors.Errorf("get %s failed: %w", root, err) } @@ -791,7 +792,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } else { // If we have an object, we already have its children, skip the object. - has, err := to.Has(link) + has, err := to.Has(ctx, link) if err != nil { lerr = xerrors.Errorf("has: %w", err) return @@ -801,7 +802,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } - if err := copyRec(from, to, link, cp); err != nil { + if err := copyRec(ctx, from, to, link, cp); err != nil { lerr = err return } @@ -859,7 +860,12 @@ func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Ver } func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { - return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + // Before v15, this was recalculated on each invocation as the state tree was mutated + if vm.GetNtwkVersion(ctx, vm.blockHeight) <= network.Version14 { + return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + } + + return vm.baseCircSupply, nil } func (vm *VM) incrementNonce(addr address.Address) error { @@ -869,32 +875,71 @@ func (vm *VM) incrementNonce(addr address.Address) error { }) } -func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { - if from == to { - return nil - } +func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { + var f *types.Actor + var fromID, toID address.Address + var err error + // switching the order around so that transactions for more than the balance sent to self fail + if networkVersion >= network.Version15 { + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } - fromID, err := vm.cstate.LookupID(from) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) - } + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } - toID, err := vm.cstate.LookupID(to) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) - } + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } - if fromID == toID { - return nil - } + if f.Balance.LessThan(amt) { + return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed, insufficient balance in sender actor: %v", f.Balance) + } - if amt.LessThan(types.NewInt(0)) { - return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) - } + if from == to { + log.Infow("sending to same address: noop", "from/to addr", from) + return nil + } - f, err := vm.cstate.GetActor(fromID) - if err != nil { - return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + log.Infow("sending to same actor ID: noop", "from/to actor", fromID) + return nil + } + } else { + if from == to { + return nil + } + + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } + + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + return nil + } + + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } + + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } } t, err := vm.cstate.GetActor(toID) @@ -902,17 +947,17 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) } - if err := deductFunds(f, amt); err != nil { + if err = deductFunds(f, amt); err != nil { return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err) } depositFunds(t, amt) - if err := vm.cstate.SetActor(fromID, f); err != nil { - return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) + if err = vm.cstate.SetActor(fromID, f); err != nil { + return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) } - if err := vm.cstate.SetActor(toID, t); err != nil { - return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) + if err = vm.cstate.SetActor(toID, t); err != nil { + return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) } return nil diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go index eb16f6460..5279389de 100644 --- a/chain/wallet/ledger/ledger.go +++ b/chain/wallet/ledger/ledger.go @@ -39,7 +39,7 @@ type LedgerKeyInfo struct { var _ api.Wallet = (*LedgerWallet)(nil) func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { - ki, err := lw.getKeyInfo(signer) + ki, err := lw.getKeyInfo(ctx, signer) if err != nil { return nil, err } @@ -80,8 +80,8 @@ func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, t }, nil } -func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) { - kib, err := lw.ds.Get(keyForAddr(addr)) +func (lw LedgerWallet) getKeyInfo(ctx context.Context, addr address.Address) (*LedgerKeyInfo, error) { + kib, err := lw.ds.Get(ctx, keyForAddr(addr)) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) } func (lw LedgerWallet) WalletDelete(ctx context.Context, k address.Address) error { - return lw.ds.Delete(keyForAddr(k)) + return lw.ds.Delete(ctx, keyForAddr(k)) } func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*types.KeyInfo, error) { @@ -103,7 +103,7 @@ func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*ty } func (lw LedgerWallet) WalletHas(ctx context.Context, k address.Address) (bool, error) { - _, err := lw.ds.Get(keyForAddr(k)) + _, err := lw.ds.Get(ctx, keyForAddr(k)) if err == nil { return true, nil } @@ -118,10 +118,10 @@ func (lw LedgerWallet) WalletImport(ctx context.Context, kinfo *types.KeyInfo) ( if err := json.Unmarshal(kinfo.PrivateKey, &ki); err != nil { return address.Undef, err } - return lw.importKey(ki) + return lw.importKey(ctx, ki) } -func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { +func (lw LedgerWallet) importKey(ctx context.Context, ki LedgerKeyInfo) (address.Address, error) { if ki.Address == address.Undef { return address.Undef, fmt.Errorf("no address given in imported key info") } @@ -133,7 +133,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { return address.Undef, xerrors.Errorf("marshaling key info: %w", err) } - if err := lw.ds.Put(keyForAddr(ki.Address), bb); err != nil { + if err := lw.ds.Put(ctx, keyForAddr(ki.Address), bb); err != nil { return address.Undef, err } @@ -141,7 +141,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { } func (lw LedgerWallet) WalletList(ctx context.Context) ([]address.Address, error) { - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. t, types.KTSecp256k1Ledger) } - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return address.Undef, err } @@ -224,7 +224,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. lki.Address = a lki.Path = path - return lw.importKey(lki) + return lw.importKey(ctx, lki) } func (lw *LedgerWallet) Get() api.Wallet { diff --git a/cli/backup.go b/cli/backup.go index 856e098dd..4d88d4bbc 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -66,7 +66,7 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma return xerrors.Errorf("opening backup file %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(cctx.Context, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/cli/client.go b/cli/client.go index daaf5f3fe..f5b38788b 100644 --- a/cli/client.go +++ b/cli/client.go @@ -26,7 +26,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multibase" "github.com/urfave/cli/v2" @@ -94,6 +93,8 @@ var clientCmd = &cli.Command{ WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), WithCategory("retrieval", clientRetrieveCmd), + WithCategory("retrieval", clientRetrieveCatCmd), + WithCategory("retrieval", clientRetrieveLsCmd), WithCategory("retrieval", clientCancelRetrievalDealCmd), WithCategory("retrieval", clientListRetrievalsCmd), WithCategory("util", clientCommPCmd), @@ -1029,209 +1030,6 @@ var clientFindCmd = &cli.Command{ }, } -const DefaultMaxRetrievePrice = "0.01" - -var clientRetrieveCmd = &cli.Command{ - Name: "retrieve", - Usage: "Retrieve data from network", - ArgsUsage: "[dataCid outputPath]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "address to send transactions from", - }, - &cli.BoolFlag{ - Name: "car", - Usage: "export to a car file instead of a regular file", - }, - &cli.StringFlag{ - Name: "miner", - Usage: "miner address for retrieval, if not present it'll use local discovery", - }, - &cli.StringFlag{ - Name: "datamodel-path-selector", - Usage: "a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal", - }, - &cli.StringFlag{ - Name: "maxPrice", - Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), - }, - &cli.StringFlag{ - Name: "pieceCid", - Usage: "require data to be retrieved from a specific Piece CID", - }, - &cli.BoolFlag{ - Name: "allow-local", - }, - }, - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 2 { - return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) - } - - fapi, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - - var payer address.Address - if cctx.String("from") != "" { - payer, err = address.NewFromString(cctx.String("from")) - } else { - payer, err = fapi.WalletDefaultAddress(ctx) - } - if err != nil { - return err - } - - file, err := cid.Parse(cctx.Args().Get(0)) - if err != nil { - return err - } - - var pieceCid *cid.Cid - if cctx.String("pieceCid") != "" { - parsed, err := cid.Parse(cctx.String("pieceCid")) - if err != nil { - return err - } - pieceCid = &parsed - } - - var order *lapi.RetrievalOrder - if cctx.Bool("allow-local") { - imports, err := fapi.ClientListImports(ctx) - if err != nil { - return err - } - - for _, i := range imports { - if i.Root != nil && i.Root.Equals(file) { - order = &lapi.RetrievalOrder{ - Root: file, - FromLocalCAR: i.CARPath, - - Total: big.Zero(), - UnsealPrice: big.Zero(), - } - break - } - } - } - - if order == nil { - var offer api.QueryOffer - minerStrAddr := cctx.String("miner") - if minerStrAddr == "" { // Local discovery - offers, err := fapi.ClientFindData(ctx, file, pieceCid) - - var cleaned []api.QueryOffer - // filter out offers that errored - for _, o := range offers { - if o.Err == "" { - cleaned = append(cleaned, o) - } - } - - offers = cleaned - - // sort by price low to high - sort.Slice(offers, func(i, j int) bool { - return offers[i].MinPrice.LessThan(offers[j].MinPrice) - }) - if err != nil { - return err - } - - // TODO: parse offer strings from `client find`, make this smarter - if len(offers) < 1 { - fmt.Println("Failed to find file") - return nil - } - offer = offers[0] - } else { // Directed retrieval - minerAddr, err := address.NewFromString(minerStrAddr) - if err != nil { - return err - } - offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) - if err != nil { - return err - } - } - if offer.Err != "" { - return fmt.Errorf("The received offer errored: %s", offer.Err) - } - - maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) - - if cctx.String("maxPrice") != "" { - maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) - if err != nil { - return xerrors.Errorf("parsing maxPrice: %w", err) - } - } - - if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { - return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) - } - - o := offer.Order(payer) - order = &o - } - ref := &lapi.FileRef{ - Path: cctx.Args().Get(1), - IsCAR: cctx.Bool("car"), - } - - if sel := textselector.Expression(cctx.String("datamodel-path-selector")); sel != "" { - order.DatamodelPathSelector = &sel - } - - updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref) - if err != nil { - return xerrors.Errorf("error setting up retrieval: %w", err) - } - - var prevStatus retrievalmarket.DealStatus - - for { - select { - case evt, ok := <-updates: - if ok { - afmt.Printf("> Recv: %s, Paid %s, %s (%s)\n", - types.SizeStr(types.NewInt(evt.BytesReceived)), - types.FIL(evt.FundsSpent), - retrievalmarket.ClientEvents[evt.Event], - retrievalmarket.DealStatuses[evt.Status], - ) - prevStatus = evt.Status - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - - if !ok { - if prevStatus == retrievalmarket.DealStatusCompleted { - afmt.Println("Success") - } else { - afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n", - retrievalmarket.DealStatuses[prevStatus]) - } - return nil - } - - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } - }, -} - var clientListRetrievalsCmd = &cli.Command{ Name: "list-retrievals", Usage: "List retrieval market deals", diff --git a/cli/client_retr.go b/cli/client_retr.go new file mode 100644 index 000000000..9b195a5d8 --- /dev/null +++ b/cli/client_retr.go @@ -0,0 +1,602 @@ +package cli + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagjson" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/markets/utils" + "github.com/filecoin-project/lotus/node/repo" +) + +const DefaultMaxRetrievePrice = "0" + +func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) { + var payer address.Address + var err error + if cctx.String("from") != "" { + payer, err = address.NewFromString(cctx.String("from")) + } else { + payer, err = fapi.WalletDefaultAddress(ctx) + } + if err != nil { + return nil, err + } + + file, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return nil, err + } + + var pieceCid *cid.Cid + if cctx.String("pieceCid") != "" { + parsed, err := cid.Parse(cctx.String("pieceCid")) + if err != nil { + return nil, err + } + pieceCid = &parsed + } + + var eref *lapi.ExportRef + if cctx.Bool("allow-local") { + imports, err := fapi.ClientListImports(ctx) + if err != nil { + return nil, err + } + + for _, i := range imports { + if i.Root != nil && i.Root.Equals(file) { + eref = &lapi.ExportRef{ + Root: file, + FromLocalCAR: i.CARPath, + } + break + } + } + } + + // no local found, so make a retrieval + if eref == nil { + var offer lapi.QueryOffer + minerStrAddr := cctx.String("provider") + if minerStrAddr == "" { // Local discovery + offers, err := fapi.ClientFindData(ctx, file, pieceCid) + + var cleaned []lapi.QueryOffer + // filter out offers that errored + for _, o := range offers { + if o.Err == "" { + cleaned = append(cleaned, o) + } + } + + offers = cleaned + + // sort by price low to high + sort.Slice(offers, func(i, j int) bool { + return offers[i].MinPrice.LessThan(offers[j].MinPrice) + }) + if err != nil { + return nil, err + } + + // TODO: parse offer strings from `client find`, make this smarter + if len(offers) < 1 { + fmt.Println("Failed to find file") + return nil, nil + } + offer = offers[0] + } else { // Directed retrieval + minerAddr, err := address.NewFromString(minerStrAddr) + if err != nil { + return nil, err + } + offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) + if err != nil { + return nil, err + } + } + if offer.Err != "" { + return nil, fmt.Errorf("offer error: %s", offer.Err) + } + + maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) + + if cctx.String("maxPrice") != "" { + maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) + if err != nil { + return nil, xerrors.Errorf("parsing maxPrice: %w", err) + } + } + + if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { + return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + } + + o := offer.Order(payer) + o.DataSelector = sel + + subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval updates: %w", err) + } + retrievalRes, err := fapi.ClientRetrieve(ctx, o) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval: %w", err) + } + + start := time.Now() + readEvents: + for { + var evt lapi.RetrievalInfo + select { + case <-ctx.Done(): + return nil, xerrors.New("Retrieval Timed Out") + case evt = <-subscribeEvents: + if evt.ID != retrievalRes.DealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + } + + event := "New" + if evt.Event != nil { + event = retrievalmarket.ClientEvents[*evt.Event] + } + + printf("Recv %s, Paid %s, %s (%s), %s\n", + types.SizeStr(types.NewInt(evt.BytesReceived)), + types.FIL(evt.TotalPaid), + strings.TrimPrefix(event, "ClientEvent"), + strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"), + time.Now().Sub(start).Truncate(time.Millisecond), + ) + + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break readEvents + case retrievalmarket.DealStatusRejected: + return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message) + } + } + + eref = &lapi.ExportRef{ + Root: file, + DealID: retrievalRes.DealID, + } + } + + return eref, nil +} + +var retrFlagsCommon = []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "address to send transactions from", + }, + &cli.StringFlag{ + Name: "provider", + Usage: "provider to use for retrieval, if not present it'll use local discovery", + Aliases: []string{"miner"}, + }, + &cli.StringFlag{ + Name: "maxPrice", + Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), + }, + &cli.StringFlag{ + Name: "pieceCid", + Usage: "require data to be retrieved from a specific Piece CID", + }, + &cli.BoolFlag{ + Name: "allow-local", + // todo: default to true? + }, +} + +var clientRetrieveCmd = &cli.Command{ + Name: "retrieve", + Usage: "Retrieve data from network", + ArgsUsage: "[dataCid outputPath]", + Description: `Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt +`, + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "car", + Usage: "Export to a car file instead of a regular file", + }, + &cli.StringFlag{ + Name: "data-selector", + Aliases: []string{"datamodel-path-selector"}, + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + &cli.BoolFlag{ + Name: "car-export-merkle-proof", + Usage: "(requires --data-selector and --car) Export data-selector merkle proof", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + if cctx.Bool("car-export-merkle-proof") { + if !cctx.Bool("car") || !cctx.IsSet("data-selector") { + return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector")) + } + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + var s *lapi.Selector + if sel := lapi.Selector(cctx.String("data-selector")); sel != "" { + s = &sel + } + + eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf) + if err != nil { + return err + } + + if s != nil { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")}) + } + + err = fapi.ClientExport(ctx, *eref, lapi.FileRef{ + Path: cctx.Args().Get(1), + IsCAR: cctx.Bool("car"), + }) + if err != nil { + return err + } + afmt.Println("Success") + return nil + }, +} + +func ClientExportStream(apiAddr string, apiAuth http.Header, eref lapi.ExportRef, car bool) (io.ReadCloser, error) { + rj, err := json.Marshal(eref) + if err != nil { + return nil, xerrors.Errorf("marshaling export ref: %w", err) + } + + ma, err := multiaddr.NewMultiaddr(apiAddr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return nil, err + } + + // todo: make cliutil helpers for this + apiAddr = "http://" + addr + } + + aa, err := url.Parse(apiAddr) + if err != nil { + return nil, xerrors.Errorf("parsing api address: %w", err) + } + switch aa.Scheme { + case "ws": + aa.Scheme = "http" + case "wss": + aa.Scheme = "https" + } + + aa.Path = path.Join(aa.Path, "rest/v0/export") + req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil) + if err != nil { + return nil, err + } + + req.Header = apiAuth + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + em, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, xerrors.Errorf("reading error body: %w", err) + } + + resp.Body.Close() // nolint + return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em)) + } + + return resp.Body, nil +} + +var clientRetrieveCatCmd = &cli.Command{ + Name: "cat", + Usage: "Show data from network", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + sel := lapi.Selector(cctx.String("data-selector")) + selp := &sel + if sel == "" { + selp = nil + } + + eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf) + if err != nil { + return err + } + + fmt.Println() // separate retrieval events from results + + if sel != "" { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel}) + } + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false) + if err != nil { + return err + } + defer rc.Close() // nolint + + _, err = io.Copy(os.Stdout, rc) + return err + }, +} + +func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) { + rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub) + if err != nil { + return "", xerrors.Errorf("failed to parse path-selector: %w", err) + } + + var b bytes.Buffer + if err := dagjson.Encode(rs.Node(), &b); err != nil { + return "", err + } + + return lapi.Selector(b.String()), nil +} + +var clientRetrieveLsCmd = &cli.Command{ + Name: "ls", + Usage: "List object links", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.IntFlag{ + Name: "depth", + Usage: "list links recursively up to the specified depth", + Value: 1, + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"), + ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll( + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ))) + if err != nil { + return xerrors.Errorf("parsing datamodel path: %w", err) + } + } + + eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf) + if err != nil { + return xerrors.Errorf("retrieve: %w", err) + } + + fmt.Println() // separate retrieval events from results + + eref.DAGs = append(eref.DAGs, lapi.DagSpec{ + DataSelector: &dataSelector, + }) + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true) + if err != nil { + return xerrors.Errorf("export: %w", err) + } + defer rc.Close() // nolint + + var memcar bytes.Buffer + _, err = io.Copy(&memcar, rc) + if err != nil { + return err + } + + cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true)) + if err != nil { + return xerrors.Errorf("opening car blockstore: %w", err) + } + + roots, err := cbs.Roots() + if err != nil { + return xerrors.Errorf("getting roots: %w", err) + } + + if len(roots) != 1 { + return xerrors.Errorf("expected 1 car root, got %d", len(roots)) + } + dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs))) + + if !cctx.Bool("ipld") { + links, err := dserv.GetLinks(ctx, roots[0]) + if err != nil { + return xerrors.Errorf("getting links: %w", err) + } + + for _, link := range links { + fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size) + } + } else { + jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + jsel, err = pathToSel(cctx.String("data-selector"), false, + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ) + } + + sel, _ := selectorparse.ParseJSONSelector(string(jsel)) + + if err := utils.TraverseDag( + ctx, + dserv, + roots[0], + sel, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + fmt.Println(p.Path) + } + return nil + }, + ); err != nil { + return err + } + } + + return err + }, +} + +type bytesReaderAt struct { + btr *bytes.Reader +} + +func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + return b.btr.ReadAt(p, off) +} + +var _ io.ReaderAt = &bytesReaderAt{} diff --git a/cli/multisig.go b/cli/multisig.go index 7b93e55f9..0179378a7 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -51,6 +51,7 @@ var multisigCmd = &cli.Command{ msigProposeCmd, msigRemoveProposeCmd, msigApproveCmd, + msigCancelCmd, msigAddProposeCmd, msigAddApproveCmd, msigAddCancelCmd, @@ -159,6 +160,8 @@ var msigCreateCmd = &cli.Command{ msgCid := sm.Cid() + fmt.Println("sent create in message: ", msgCid) + // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -448,7 +451,7 @@ var msigProposeCmd = &cli.Command{ msgCid := sm.Cid() - fmt.Println("send proposal in message: ", msgCid) + fmt.Println("sent proposal in message: ", msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -612,6 +615,131 @@ var msigApproveCmd = &cli.Command{ }, } +var msigCancelCmd = &cli.Command{ + Name: "cancel", + Usage: "Cancel a multisig message", + ArgsUsage: " [destination value [methodId methodParams]]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "account to send the cancel message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 2 { + return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) + } + + if cctx.Args().Len() > 2 && cctx.Args().Len() < 4 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel ")) + } + + if cctx.Args().Len() > 4 && cctx.Args().Len() != 6 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel [ ]")) + } + + srv, err := GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() + ctx := ReqContext(cctx) + + msig, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + var from address.Address + if cctx.IsSet("from") { + f, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + from = f + } else { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + from = defaddr + } + + var msgCid cid.Cid + if cctx.Args().Len() == 2 { + proto, err := api.MsigCancel(ctx, msig, txid, from) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } else { + dest, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + value, err := types.ParseFIL(cctx.Args().Get(3)) + if err != nil { + return err + } + + var method uint64 + var params []byte + if cctx.Args().Len() == 6 { + m, err := strconv.ParseUint(cctx.Args().Get(4), 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(cctx.Args().Get(5)) + if err != nil { + return err + } + params = p + } + + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } + + fmt.Println("sent cancel in message: ", msgCid) + + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("cancel returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} + var msigRemoveProposeCmd = &cli.Command{ Name: "propose-remove", Usage: "Propose to remove a signer", @@ -1490,7 +1618,7 @@ var msigLockCancelCmd = &cli.Command{ return actErr } - proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } diff --git a/cli/wait.go b/cli/wait.go index 5fc5fa469..a3c0e511a 100644 --- a/cli/wait.go +++ b/cli/wait.go @@ -1,6 +1,7 @@ package cli import ( + "context" "fmt" "time" @@ -10,8 +11,22 @@ import ( var WaitApiCmd = &cli.Command{ Name: "wait-api", Usage: "Wait for lotus api to come online", + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "timeout", + Usage: "duration to wait till fail", + Value: time.Second * 30, + }, + }, Action: func(cctx *cli.Context) error { - for i := 0; i < 30; i++ { + ctx := ReqContext(cctx) + ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout")) + defer cancel() + for { + if ctx.Err() != nil { + break + } + api, closer, err := GetAPI(cctx) if err != nil { fmt.Printf("Not online yet... (%s)\n", err) @@ -20,8 +35,6 @@ var WaitApiCmd = &cli.Command{ } defer closer() - ctx := ReqContext(cctx) - _, err = api.Version(ctx) if err != nil { return err @@ -29,6 +42,11 @@ var WaitApiCmd = &cli.Command{ return nil } - return fmt.Errorf("timed out waiting for api to come online") + + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("timed out waiting for api to come online") + } + + return ctx.Err() }, } diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index f4cc0f837..358fbd046 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -5,10 +5,11 @@ import ( "context" "errors" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -36,7 +37,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } hash := hasher.Sum(nil) key := datastore.NewKey(string(hash)) - fromDs, err := cv.ds.Get(key) + fromDs, err := cv.ds.Get(context.Background(), key) if err == nil { switch fromDs[0] { case 's': @@ -66,7 +67,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } if len(save) != 0 { - errSave := cv.ds.Put(key, save) + errSave := cv.ds.Put(context.Background(), key, save) if errSave != nil { log.Errorf("error saving result: %+v", errSave) } @@ -97,8 +98,12 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } -func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { return cv.backend.VerifyAggregateSeals(aggregate) } +func (cv cachingVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return cv.backend.VerifyReplicaUpdate(update) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c66b90deb..18ce6c7ef 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -17,8 +17,6 @@ import ( "time" ocprom "contrib.go.opencensus.io/exporter/prometheus" - "github.com/cockroachdb/pebble" - "github.com/cockroachdb/pebble/bloom" "github.com/ipfs/go-cid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -46,7 +44,6 @@ import ( "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" measure "github.com/ipfs/go-ds-measure" - pebbleds "github.com/ipfs/go-ds-pebble" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -114,9 +111,6 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-import", }, - &cli.BoolFlag{ - Name: "use-pebble", - }, &cli.BoolFlag{ Name: "use-native-badger", }, @@ -178,29 +172,6 @@ var importBenchCmd = &cli.Command{ ) switch { - case cctx.Bool("use-pebble"): - log.Info("using pebble") - cache := 512 - ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ - // Pebble has a single combined cache area and the write - // buffers are taken from this too. Assign all available - // memory allowance for cache. - Cache: pebble.NewCache(int64(cache * 1024 * 1024)), - // The size of memory table(as well as the write buffer). - // Note, there may have more than two memory tables in the system. - // MemTableStopWritesThreshold can be configured to avoid the memory abuse. - MemTableSize: cache * 1024 * 1024 / 4, - // The default compaction concurrency(1 thread), - // Here use all available CPUs for faster compaction. - MaxConcurrentCompactions: runtime.NumCPU(), - // Per-level options. Options for at least one level must be specified. The - // options for the last level are used for all subsequent levels. - Levels: []pebble.LevelOptions{ - {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression}, - }, - Logger: log, - }) - case cctx.Bool("use-native-badger"): log.Info("using native badger") var opts badgerbs.Options @@ -333,7 +304,7 @@ var importBenchCmd = &cli.Command{ return fmt.Errorf("no CAR file provided for import") } - head, err = cs.Import(carFile) + head, err = cs.Import(cctx.Context, carFile) if err != nil { return err } @@ -356,7 +327,7 @@ var importBenchCmd = &cli.Command{ return xerrors.Errorf("failed to parse head tipset key: %w", err) } - head, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) if err != nil { return err } @@ -365,7 +336,7 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } - head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cr.Header.Roots...)) if err != nil { return err } @@ -382,7 +353,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to parse genesis tipset key: %w", err) } - genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + genesis, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else { log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset") // fallback to the slow path of walking the chain. @@ -393,7 +364,7 @@ var importBenchCmd = &cli.Command{ return err } - if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil { + if err = cs.SetGenesis(cctx.Context, genesis.Blocks()[0]); err != nil { return err } @@ -404,10 +375,10 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to end genesis tipset key: %w", err) } - end, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + end, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("end-height"); h != 0 { log.Infof("getting end tipset at height %d...", h) - end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) + end, err = cs.GetTipsetByHeight(cctx.Context, abi.ChainEpoch(h), head, true) } if err != nil { @@ -426,7 +397,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to start genesis tipset key: %w", err) } - start, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + start, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("start-height"); h != 0 { log.Infof("getting start tipset at height %d...", h) // lookback from the end tipset (which falls back to head if not supplied). @@ -439,7 +410,7 @@ var importBenchCmd = &cli.Command{ if start != nil { startEpoch = start.Height() - if err := cs.ForceHeadSilent(context.Background(), start); err != nil { + if err := cs.ForceHeadSilent(cctx.Context, start); err != nil { // if err := cs.SetHead(start); err != nil { return err } @@ -450,7 +421,7 @@ var importBenchCmd = &cli.Command{ if h := ts.Height(); h%100 == 0 { log.Infof("walking back the chain; loaded tipset at height %d...", h) } - next, err := cs.LoadTipSet(ts.Parents()) + next, err := cs.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index a5e9710a9..ae742c663 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -13,6 +13,8 @@ import ( "path/filepath" "strconv" + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/ipfs/go-datastore" @@ -345,7 +347,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string return err } - if err := mds.Put(sectorKey, b); err != nil { + if err := mds.Put(ctx, sectorKey, b); err != nil { return err } @@ -385,7 +387,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string buf := make([]byte, binary.MaxVarintLen64) size := binary.PutUvarint(buf, uint64(maxSectorID)) - return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) + return mds.Put(ctx, datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) } func findMarketDealID(ctx context.Context, api v1api.FullNode, deal market2.DealProposal) (abi.DealID, error) { @@ -426,7 +428,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode return xerrors.Errorf("peer ID from private key: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -439,7 +441,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } if cctx.Bool("genesis-miner") { - if err := mds.Put(datastore.NewKey("miner-address"), a.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), a.Bytes()); err != nil { return err } @@ -546,7 +548,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } log.Infof("Created new miner: %s", addr) - if err := mds.Put(datastore.NewKey("miner-address"), addr.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), addr.Bytes()); err != nil { return err } @@ -644,11 +646,26 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, return address.Address{}, err } + sender := owner + if fromstr := cctx.String("from"); fromstr != "" { + faddr, err := address.NewFromString(fromstr) + if err != nil { + return address.Undef, fmt.Errorf("could not parse from address: %w", err) + } + sender = faddr + } + + // make sure the sender account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err) + } + // make sure the worker account exists on chain _, err = api.StateLookupID(ctx, worker, types.EmptyTSK) if err != nil { signed, err := api.MpoolPushMessage(ctx, &types.Message{ - From: owner, + From: sender, To: worker, Value: types.NewInt(0), }, nil) @@ -668,35 +685,46 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, } } - nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + // make sure the owner account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) if err != nil { - return address.Undef, xerrors.Errorf("getting network version: %w", err) + signed, err := api.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } } - spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv) + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) if err != nil { - return address.Undef, xerrors.Errorf("getting seal proof type: %w", err) + return address.Undef, xerrors.Errorf("getting post proof type: %w", err) } - params, err := actors.SerializeParams(&power2.CreateMinerParams{ - Owner: owner, - Worker: worker, - SealProofType: spt, - Peer: abi.PeerID(peerid), + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + Peer: abi.PeerID(peerid), }) if err != nil { return address.Undef, err } - sender := owner - if fromstr := cctx.String("from"); fromstr != "" { - faddr, err := address.NewFromString(fromstr) - if err != nil { - return address.Undef, fmt.Errorf("could not parse from address: %w", err) - } - sender = faddr - } - createStorageMinerMsg := &types.Message{ To: power.Address, From: sender, diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go index 0974a7c5d..1aaa7909a 100644 --- a/cmd/lotus-miner/init_restore.go +++ b/cmd/lotus-miner/init_restore.go @@ -233,7 +233,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -255,7 +255,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Checking actor metadata") - abytes, err := mds.Get(datastore.NewKey("miner-address")) + abytes, err := mds.Get(ctx, datastore.NewKey("miner-address")) if err != nil { return xerrors.Errorf("getting actor address from metadata datastore: %w", err) } diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 110748f48..57b5d8a3e 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -7,7 +7,6 @@ import ( "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" - "go.opencensus.io/trace" "golang.org/x/xerrors" cliutil "github.com/filecoin-project/lotus/cli/util" @@ -55,10 +54,11 @@ func main() { lcli.WithCategory("storage", sealingCmd), lcli.WithCategory("retrieval", piecesCmd), } + jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -66,7 +66,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if cctx.IsSet("color") { diff --git a/cmd/lotus-miner/pieces.go b/cmd/lotus-miner/pieces.go index 75605c1ed..778f8e6cf 100644 --- a/cmd/lotus-miner/pieces.go +++ b/cmd/lotus-miner/pieces.go @@ -6,6 +6,7 @@ import ( "text/tabwriter" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/tablewriter" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" ) @@ -48,6 +49,12 @@ var piecesListPiecesCmd = &cli.Command{ var piecesListCidInfosCmd = &cli.Command{ Name: "list-cids", Usage: "list registered payload CIDs", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + }, + }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { @@ -61,9 +68,54 @@ var piecesListCidInfosCmd = &cli.Command{ return err } + w := tablewriter.New(tablewriter.Col("CID"), + tablewriter.Col("Piece"), + tablewriter.Col("BlockOffset"), + tablewriter.Col("BlockLen"), + tablewriter.Col("Deal"), + tablewriter.Col("Sector"), + tablewriter.Col("DealOffset"), + tablewriter.Col("DealLen"), + ) + for _, c := range cids { - fmt.Println(c) + if !cctx.Bool("verbose") { + fmt.Println(c) + continue + } + + ci, err := nodeApi.PiecesGetCIDInfo(ctx, c) + if err != nil { + fmt.Printf("Error getting CID info: %s\n", err) + continue + } + + for _, location := range ci.PieceBlockLocations { + pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID) + if err != nil { + fmt.Printf("Error getting piece info: %s\n", err) + continue + } + + for _, deal := range pi.Deals { + w.Write(map[string]interface{}{ + "CID": c, + "Piece": location.PieceCID, + "BlockOffset": location.RelOffset, + "BlockLen": location.BlockSize, + "Deal": deal.DealID, + "Sector": deal.SectorID, + "DealOffset": deal.Offset, + "DealLen": deal.Length, + }) + } + } } + + if cctx.Bool("verbose") { + return w.Flush(os.Stdout) + } + return nil }, } diff --git a/cmd/lotus-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go index 1ce1f6593..bd5d30a4e 100644 --- a/cmd/lotus-miner/retrieval-deals.go +++ b/cmd/lotus-miner/retrieval-deals.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "sort" "text/tabwriter" "github.com/docker/go-units" @@ -137,6 +138,10 @@ var retrievalDealsListCmd = &cli.Command{ return err } + sort.Slice(deals, func(i, j int) bool { + return deals[i].ID < deals[j].ID + }) + w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) _, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n") diff --git a/cmd/lotus-miner/sealing.go b/cmd/lotus-miner/sealing.go index 472af8da6..16b02f7bb 100644 --- a/cmd/lotus-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "os" "sort" "strings" @@ -32,6 +33,17 @@ var sealingCmd = &cli.Command{ }, } +var barCols = float64(64) + +func barString(total, y, g float64) string { + yBars := int(math.Round(y / total * barCols)) + gBars := int(math.Round(g / total * barCols)) + eBars := int(barCols) - yBars - gBars + return color.YellowString(strings.Repeat("|", yBars)) + + color.GreenString(strings.Repeat("|", gBars)) + + strings.Repeat(" ", eBars) +} + var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", @@ -77,7 +89,7 @@ var sealingWorkersCmd = &cli.Command{ for _, stat := range st { gpuUse := "not " gpuCol := color.FgBlue - if stat.GpuUsed { + if stat.GpuUsed > 0 { gpuCol = color.FgGreen gpuUse = "" } @@ -89,56 +101,43 @@ var sealingWorkersCmd = &cli.Command{ fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled) - var barCols = uint64(64) - cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs) - cpuBar := strings.Repeat("|", cpuBars) - if int(barCols)-cpuBars >= 0 { - cpuBar += strings.Repeat(" ", int(barCols)-cpuBars) - } - fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n", - color.GreenString(cpuBar), stat.CpuUse, stat.Info.Resources.CPUs) + barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs) - ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical) - ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical) - ramRepeatSpace := int(barCols) - (ramBarsUsed + ramBarsRes) - - colorFunc := color.YellowString - if ramRepeatSpace < 0 { - ramRepeatSpace = 0 - colorFunc = color.RedString + ramTotal := stat.Info.Resources.MemPhysical + ramTasks := stat.MemUsedMin + ramUsed := stat.Info.Resources.MemUsed + var ramReserved uint64 = 0 + if ramUsed > ramTasks { + ramReserved = ramUsed - ramTasks } - - ramBar := colorFunc(strings.Repeat("|", ramBarsRes)) + - color.GreenString(strings.Repeat("|", ramBarsUsed)) + - strings.Repeat(" ", ramRepeatSpace) - - vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap - - vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem) - vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem) - vmemRepeatSpace := int(barCols) - (vmemBarsUsed + vmemBarsRes) - - colorFunc = color.YellowString - if vmemRepeatSpace < 0 { - vmemRepeatSpace = 0 - colorFunc = color.RedString - } - - vmemBar := colorFunc(strings.Repeat("|", vmemBarsRes)) + - color.GreenString(strings.Repeat("|", vmemBarsUsed)) + - strings.Repeat(" ", vmemRepeatSpace) + ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks)) fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMin)), + (ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical, + types.SizeStr(types.NewInt(ramTasks+ramUsed)), types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical))) - fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMax)*100/vmem, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMax)), - types.SizeStr(types.NewInt(vmem))) + vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap + vmemTasks := stat.MemUsedMax + vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed + var vmemReserved uint64 = 0 + if vmemUsed > vmemTasks { + vmemReserved = vmemUsed - vmemTasks + } + vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks)) + fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, + (vmemTasks+vmemReserved)*100/vmemTotal, + types.SizeStr(types.NewInt(vmemTasks+vmemReserved)), + types.SizeStr(types.NewInt(vmemTotal))) + + if len(stat.Info.Resources.GPUs) > 0 { + gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed) + fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar), + stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)), + stat.GpuUsed, len(stat.Info.Resources.GPUs)) + } for _, gpu := range stat.Info.Resources.GPUs { fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse)) } diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 43a71fd9e..b2059a737 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -1648,6 +1648,11 @@ var sectorsUpdateCmd = &cli.Command{ return xerrors.Errorf("could not parse sector number: %w", err) } + _, err = nodeApi.SectorsStatus(ctx, abi.SectorNumber(id), false) + if err != nil { + return xerrors.Errorf("sector %d not found, could not change state", id) + } + newState := cctx.Args().Get(1) if _, ok := sealing.ExistSectorStateList[sealing.SectorState(newState)]; !ok { fmt.Printf(" \"%s\" is not a valid state. Possible states for sectors are: \n", newState) diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index e7508eb29..6f7a627f6 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -48,6 +48,7 @@ stored while moving through the sealing pipeline (references as 'seal').`, storageListCmd, storageFindCmd, storageCleanupCmd, + storageLocks, }, } @@ -95,6 +96,14 @@ over time Name: "max-storage", Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", }, + &cli.StringSliceFlag{ + Name: "groups", + Usage: "path group names", + }, + &cli.StringSliceFlag{ + Name: "allow-to", + Usage: "path groups allowed to pull data from this path (allow all if not specified)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -142,6 +151,8 @@ over time CanSeal: cctx.Bool("seal"), CanStore: cctx.Bool("store"), MaxStorage: uint64(maxStor), + Groups: cctx.StringSlice("groups"), + AllowTo: cctx.StringSlice("allow-to"), } if !(cfg.CanStore || cfg.CanSeal) { @@ -322,10 +333,17 @@ var storageListCmd = &cli.Command{ if si.CanStore { fmt.Print(color.CyanString("Store")) } - fmt.Println("") } else { fmt.Print(color.HiYellowString("Use: ReadOnly")) } + fmt.Println() + + if len(si.Groups) > 0 { + fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", ")) + } + if len(si.AllowTo) > 0 { + fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", ")) + } if localPath, ok := local[s.ID]; ok { fmt.Printf("\tLocal: %s\n", color.GreenString(localPath)) @@ -741,3 +759,43 @@ func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi v0 return nil } + +var storageLocks = &cli.Command{ + Name: "locks", + Usage: "show active sector locks", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + locks, err := api.StorageGetLocks(ctx) + if err != nil { + return err + } + + for _, lock := range locks.Locks { + st, err := api.SectorsStatus(ctx, lock.Sector.Number, false) + if err != nil { + return xerrors.Errorf("getting sector status(%d): %w", lock.Sector.Number, err) + } + + lockstr := fmt.Sprintf("%d\t%s\t", lock.Sector.Number, color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State)) + + for i := 0; i < storiface.FileTypes; i++ { + if lock.Write[i] > 0 { + lockstr += fmt.Sprintf("%s(%s) ", storiface.SectorFileType(1< 0 { + lockstr += fmt.Sprintf("%s(%s:%d) ", storiface.SectorFileType(1< 0 { + fmt.Printf("WAITING FOR %d MESSAGES\n", len(msgs)) + } + + for _, msg := range msgs { + ml, err := api.StateWaitMsg(ctx, msg, confidence, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + if ml.Receipt.ExitCode != exitcode.Ok { + fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msg, ml.Receipt.ExitCode) + } + } + } + }, +} diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index c3a9e572c..ff740a772 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -83,7 +83,7 @@ var datastoreListCmd = &cli.Command{ genc := cctx.String("get-enc") - q, err := ds.Query(dsq.Query{ + q, err := ds.Query(context.Background(), dsq.Query{ Prefix: datastore.NewKey(cctx.Args().Get(1)).String(), KeysOnly: genc == "", }) @@ -147,7 +147,7 @@ var datastoreGetCmd = &cli.Command{ return err } - val, err := ds.Get(datastore.NewKey(cctx.Args().Get(1))) + val, err := ds.Get(context.Background(), datastore.NewKey(cctx.Args().Get(1))) if err != nil { return xerrors.Errorf("get: %w", err) } diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index e711ba2bb..3851e4922 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -93,7 +93,7 @@ var exportChainCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, nil, nil) defer cs.Close() //nolint:errcheck - if err := cs.Load(); err != nil { + if err := cs.Load(context.Background()); err != nil { return err } @@ -110,7 +110,7 @@ var exportChainCmd = &cli.Command{ tsk := types.NewTipSetKey(cids...) - selts, err := cs.LoadTipSet(tsk) + selts, err := cs.LoadTipSet(context.Background(), tsk) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 4a692d4c9..cc042064d 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -64,7 +64,7 @@ var genesisVerifyCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - ts, err := cs.Import(f) + ts, err := cs.Import(cctx.Context, f) if err != nil { return err } diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 4e465029f..973e7b31b 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -82,7 +82,7 @@ var importCarCmd = &cli.Command{ return err case nil: fmt.Printf("\r%s", blk.Cid()) - if err := bs.Put(blk); err != nil { + if err := bs.Put(context.Background(), blk); err != nil { if err := f.Close(); err != nil { return err } @@ -146,7 +146,7 @@ var importObjectCmd = &cli.Command{ return err } - if err := bs.Put(blk); err != nil { + if err := bs.Put(context.Background(), blk); err != nil { return err } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index a982fcf23..d18931312 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -64,6 +64,8 @@ func main() { splitstoreCmd, fr32Cmd, chainCmd, + balancerCmd, + terminationsCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index 8221e53eb..aaef4690e 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "path" @@ -198,7 +199,7 @@ var marketExportDatastoreCmd = &cli.Command{ } // Write the backup to the file - if err := bds.Backup(out); err != nil { + if err := bds.Backup(context.Background(), out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } @@ -215,7 +216,7 @@ var marketExportDatastoreCmd = &cli.Command{ } func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batching) error { - q, err := ds.Query(dsq.Query{ + q, err := ds.Query(context.Background(), dsq.Query{ Prefix: prefix, }) if err != nil { @@ -225,7 +226,7 @@ func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batch for res := range q.Next() { fmt.Println("Exporting key " + res.Key) - err := backupDs.Put(datastore.NewKey(res.Key), res.Value) + err := backupDs.Put(context.Background(), datastore.NewKey(res.Key), res.Value) if err != nil { return xerrors.Errorf("putting %s to backup datastore: %w", res.Key, err) } diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go index ec5a445f9..479e081e9 100644 --- a/cmd/lotus-shed/miner.go +++ b/cmd/lotus-shed/miner.go @@ -2,11 +2,29 @@ package main import ( "bufio" + "bytes" + "fmt" "io" "os" "path/filepath" "strings" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + + "github.com/docker/go-units" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -17,6 +35,231 @@ var minerCmd = &cli.Command{ Usage: "miner-related utilities", Subcommands: []*cli.Command{ minerUnpackInfoCmd, + minerCreateCmd, + minerFaultsCmd, + }, +} + +var minerFaultsCmd = &cli.Command{ + Name: "faults", + Usage: "Display a list of faulty sectors for a SP", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "expiring-in", + Usage: "only list sectors that are expiring in the next epochs", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass miner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + + ctx := lcli.ReqContext(cctx) + + m, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + faultBf, err := api.StateMinerFaults(ctx, m, types.EmptyTSK) + if err != nil { + return err + } + + faults, err := faultBf.All(miner2.SectorsMax) + if err != nil { + return err + } + + if len(faults) == 0 { + fmt.Println("no faults") + return nil + } + + expEpoch := abi.ChainEpoch(cctx.Uint64("expiring-in")) + + if expEpoch == 0 { + fmt.Print("faulty sectors: ") + for _, v := range faults { + fmt.Printf("%d ", v) + } + + return nil + } + + h, err := api.ChainHead(ctx) + if err != nil { + return err + } + + fmt.Printf("faulty sectors expiring in the next %d epochs: ", expEpoch) + for _, v := range faults { + ss, err := api.StateSectorExpiration(ctx, m, abi.SectorNumber(v), types.EmptyTSK) + if err != nil { + return err + } + + if ss.Early < h.Height()+expEpoch { + fmt.Printf("%d ", v) + } + } + + return nil + }, +} + +var minerCreateCmd = &cli.Command{ + Name: "create", + Usage: "sends a create miner msg", + ArgsUsage: "[sender] [owner] [worker] [sector size]", + Action: func(cctx *cli.Context) error { + wapi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Args().Len() != 4 { + return xerrors.Errorf("expected 4 args (sender owner worker sectorSize)") + } + + sender, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + owner, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + worker, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + ssize, err := units.RAMInBytes(cctx.Args().Get(3)) + if err != nil { + return fmt.Errorf("failed to parse sector size: %w", err) + } + + // make sure the sender account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("sender must exist on chain: %w", err) + } + + // make sure the worker account exists on chain + _, err = wapi.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: worker, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push worker init: %w", err) + } + + log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for worker init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // make sure the owner account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Wating for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for owner init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) + if err != nil { + return xerrors.Errorf("getting post proof type: %w", err) + } + + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + }) + + if err != nil { + return err + } + + createStorageMinerMsg := &types.Message{ + To: power.Address, + From: sender, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + + signed, err := wapi.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + if err != nil { + return xerrors.Errorf("pushing createMiner message: %w", err) + } + + log.Infof("Pushed CreateMiner message: %s", signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for createMiner message: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) + } + + var retval power6.CreateMinerReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { + return err + } + + log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress) + + return nil }, } diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go index b640fb9c9..7853624a6 100644 --- a/cmd/lotus-shed/msg.go +++ b/cmd/lotus-shed/msg.go @@ -148,6 +148,15 @@ func printMessage(cctx *cli.Context, msg *types.Message) error { fmt.Println("Params:", p) + if msg, err := messageFromBytes(cctx, msg.Params); err == nil { + fmt.Println("---") + color.Red("Params message:") + + if err := printMessage(cctx, msg.VMMessage()); err != nil { + return err + } + } + return nil } diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 186a3191a..164ff197a 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -171,7 +171,7 @@ var stateTreePruneCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - if err := cs.Load(); err != nil { + if err := cs.Load(context.Background()); err != nil { return fmt.Errorf("loading chainstore: %w", err) } diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 726d992c4..4894a6eea 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -2,11 +2,14 @@ package main import ( "bytes" + "context" "encoding/base64" + "encoding/binary" "fmt" "image" "image/color" "image/png" + "io" "os" "sort" "strconv" @@ -23,6 +26,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" @@ -38,6 +42,7 @@ var sectorsCmd = &cli.Command{ terminateSectorCmd, terminateSectorPenaltyEstimationCmd, visAllocatedSectorsCmd, + dumpRLESectorCmd, }, } @@ -275,6 +280,113 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{ }, } +func activeMiners(ctx context.Context, api v0api.FullNode) ([]address.Address, error) { + miners, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return nil, err + } + powCache := make(map[address.Address]types.BigInt) + var lk sync.Mutex + parmap.Par(32, miners, func(a address.Address) { + pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) + + lk.Lock() + if err == nil { + powCache[a] = pow.MinerPower.QualityAdjPower + } else { + powCache[a] = types.NewInt(0) + } + lk.Unlock() + }) + sort.Slice(miners, func(i, j int) bool { + return powCache[miners[i]].GreaterThan(powCache[miners[j]]) + }) + n := sort.Search(len(miners), func(i int) bool { + pow := powCache[miners[i]] + return pow.IsZero() + }) + return append(miners[0:0:0], miners[:n]...), nil +} + +var dumpRLESectorCmd = &cli.Command{ + Name: "dump-rles", + Usage: "Dump AllocatedSectors RLEs from miners passed as arguments as run lengths in uint64 LE format.\nIf no arguments are passed, dumps all active miners in the state tree.", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + var miners []address.Address + if cctx.NArg() == 0 { + miners, err = activeMiners(ctx, api) + if err != nil { + return xerrors.Errorf("getting active miners: %w", err) + } + } else { + for _, mS := range cctx.Args().Slice() { + mA, err := address.NewFromString(mS) + if err != nil { + return xerrors.Errorf("parsing address '%s': %w", mS, err) + } + miners = append(miners, mA) + } + } + wbuf := make([]byte, 8) + buf := &bytes.Buffer{} + + for i := 0; i < len(miners); i++ { + buf.Reset() + err := func() error { + state, err := api.StateReadState(ctx, miners[i], types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting state: %+v", err) + } + allocSString := state.State.(map[string]interface{})["AllocatedSectors"].(map[string]interface{})["/"].(string) + + allocCid, err := cid.Decode(allocSString) + if err != nil { + return xerrors.Errorf("decoding cid: %+v", err) + } + rle, err := api.ChainReadObj(ctx, allocCid) + if err != nil { + return xerrors.Errorf("reading AllocatedSectors: %+v", err) + } + + var bf bitfield.BitField + err = bf.UnmarshalCBOR(bytes.NewReader(rle)) + if err != nil { + return xerrors.Errorf("decoding bitfield: %w", err) + } + ri, err := bf.RunIterator() + if err != nil { + return xerrors.Errorf("creating iterator: %w", err) + } + + for ri.HasNext() { + run, err := ri.NextRun() + if err != nil { + return xerrors.Errorf("getting run: %w", err) + } + binary.LittleEndian.PutUint64(wbuf, run.Len) + buf.Write(wbuf) + } + _, err = io.Copy(os.Stdout, buf) + if err != nil { + return xerrors.Errorf("copy: %w", err) + } + + return nil + }() + if err != nil { + log.Errorf("miner %d: %s: %+v", i, miners[i], err) + } + } + return nil + }, +} + var visAllocatedSectorsCmd = &cli.Command{ Name: "vis-allocated", Usage: "Produces a html with visualisation of allocated sectors", @@ -287,32 +399,10 @@ var visAllocatedSectorsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) var miners []address.Address if cctx.NArg() == 0 { - miners, err = api.StateListMiners(ctx, types.EmptyTSK) + miners, err = activeMiners(ctx, api) if err != nil { - return err + return xerrors.Errorf("getting active miners: %w", err) } - powCache := make(map[address.Address]types.BigInt) - var lk sync.Mutex - parmap.Par(32, miners, func(a address.Address) { - pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) - - lk.Lock() - if err == nil { - powCache[a] = pow.MinerPower.QualityAdjPower - } else { - powCache[a] = types.NewInt(0) - } - lk.Unlock() - }) - sort.Slice(miners, func(i, j int) bool { - return powCache[miners[i]].GreaterThan(powCache[miners[j]]) - }) - n := sort.Search(len(miners), func(i int) bool { - pow := powCache[miners[i]] - log.Infof("pow @%d = %s", i, pow) - return pow.IsZero() - }) - miners = miners[:n] } else { for _, mS := range cctx.Args().Slice() { mA, err := address.NewFromString(mS) diff --git a/cmd/lotus-shed/splitstore.go b/cmd/lotus-shed/splitstore.go index 4f668888e..58563955f 100644 --- a/cmd/lotus-shed/splitstore.go +++ b/cmd/lotus-shed/splitstore.go @@ -331,7 +331,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { } var keys []datastore.Key - res, err := ds.Query(query.Query{Prefix: "/splitstore"}) + res, err := ds.Query(context.Background(), query.Query{Prefix: "/splitstore"}) if err != nil { return xerrors.Errorf("error querying datastore for splitstore keys: %w", err) } @@ -346,7 +346,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { for _, k := range keys { fmt.Printf("deleting %s from datastore...\n", k) - err = ds.Delete(k) + err = ds.Delete(context.Background(), k) if err != nil { return xerrors.Errorf("error deleting key %s from datastore: %w", k, err) } diff --git a/cmd/lotus-shed/terminations.go b/cmd/lotus-shed/terminations.go new file mode 100644 index 000000000..0691f35da --- /dev/null +++ b/cmd/lotus-shed/terminations.go @@ -0,0 +1,181 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "strconv" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/node/repo" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" +) + +var terminationsCmd = &cli.Command{ + Name: "terminations", + Description: "Lists terminated deals from the past 2 days", + ArgsUsage: "[block to look back from] [lookback period (epochs)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass block cid && lookback period") + } + + blkCid, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + blk, err := cs.GetBlock(ctx, blkCid) + if err != nil { + return err + } + + lbp, err := strconv.Atoi(cctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + cutoff := blk.Height - abi.ChainEpoch(lbp) + + for blk.Height > cutoff { + pts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return err + } + + blk = pts.Blocks()[0] + + msgs, err := cs.MessagesForTipset(ctx, pts) + if err != nil { + return err + } + + for _, v := range msgs { + msg := v.VMMessage() + if msg.Method != miner.Methods.TerminateSectors { + continue + } + + tree, err := state.LoadStateTree(cst, blk.ParentStateRoot) + if err != nil { + return err + } + + minerAct, err := tree.GetActor(msg.To) + if err != nil { + return err + } + + if !builtin.IsStorageMinerActor(minerAct.Code) { + continue + } + + minerSt, err := miner.Load(store, minerAct) + if err != nil { + return err + } + + marketAct, err := tree.GetActor(market.Address) + if err != nil { + return err + } + + marketSt, err := market.Load(store, marketAct) + if err != nil { + return err + } + + proposals, err := marketSt.Proposals() + if err != nil { + return err + } + + var termParams miner2.TerminateSectorsParams + err = termParams.UnmarshalCBOR(bytes.NewBuffer(msg.Params)) + if err != nil { + return err + } + + for _, t := range termParams.Terminations { + sectors, err := minerSt.LoadSectors(&t.Sectors) + if err != nil { + return err + } + + for _, sector := range sectors { + for _, deal := range sector.DealIDs { + prop, find, err := proposals.Get(deal) + if err != nil { + return err + } + if find { + fmt.Printf("%s, %d, %d, %s, %s, %s\n", msg.To, sector.SectorNumber, deal, prop.Client, prop.PieceCID, prop.Label) + } + } + } + } + } + } + + return nil + }, +} diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go index 4867a5da5..23ea454a3 100644 --- a/cmd/lotus-sim/create.go +++ b/cmd/lotus-sim/create.go @@ -26,7 +26,7 @@ var createSimCommand = &cli.Command{ var ts *types.TipSet switch cctx.NArg() { case 0: - if err := node.Chainstore.Load(); err != nil { + if err := node.Chainstore.Load(cctx.Context); err != nil { return err } ts = node.Chainstore.GetHeaviestTipSet() @@ -36,7 +36,7 @@ var createSimCommand = &cli.Command{ return err } tsk := types.NewTipSetKey(cids...) - ts, err = node.Chainstore.LoadTipSet(tsk) + ts, err = node.Chainstore.LoadTipSet(cctx.Context, tsk) if err != nil { return err } diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go index 4372ee34a..a92d2cde4 100644 --- a/cmd/lotus-sim/info_capacity.go +++ b/cmd/lotus-sim/info_capacity.go @@ -39,7 +39,7 @@ var infoCapacityGrowthSimCommand = &cli.Command{ lastHeight := ts.Height() for ts.Height() > firstEpoch && cctx.Err() == nil { - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go index 5c9541513..125dae81d 100644 --- a/cmd/lotus-sim/info_state.go +++ b/cmd/lotus-sim/info_state.go @@ -60,7 +60,7 @@ var infoStateGrowthSimCommand = &cli.Command{ var links []cid.Cid var totalSize uint64 - if err := store.View(c, func(data []byte) error { + if err := store.View(cctx.Context, c, func(data []byte) error { totalSize += uint64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { if c.Prefix().Codec != cid.DagCBOR { @@ -131,7 +131,7 @@ var infoStateGrowthSimCommand = &cli.Command{ fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) } - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go index 93e6a3191..106bc53f5 100644 --- a/cmd/lotus-sim/simulation/block.go +++ b/cmd/lotus-sim/simulation/block.go @@ -73,7 +73,7 @@ func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message Timestamp: uts, ElectionProof: &types.ElectionProof{WinCount: 1}, }} - err = sim.Node.Chainstore.PersistBlockHeaders(blks...) + err = sim.Node.Chainstore.PersistBlockHeaders(ctx, blks...) if err != nil { return nil, xerrors.Errorf("failed to persist block headers: %w", err) } diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go index 5bed27436..d6dd98d43 100644 --- a/cmd/lotus-sim/simulation/messages.go +++ b/cmd/lotus-sim/simulation/messages.go @@ -31,7 +31,7 @@ func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Mess // fail a pre-commit... var msgCids []cid.Cid for _, msg := range messages { - c, err := sim.Node.Chainstore.PutMessage(msg) + c, err := sim.Node.Chainstore.PutMessage(ctx, msg) if err != nil { return cid.Undef, err } diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go index 38648f758..7656aaa28 100644 --- a/cmd/lotus-sim/simulation/mock/mock.go +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -6,6 +6,8 @@ import ( "encoding/binary" "fmt" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -70,6 +72,12 @@ func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPro ) return false, nil } + +// TODO: do the thing +func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return false, nil +} + func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { panic("should not be called") } diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index c18f27a33..da027ff4f 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -135,7 +135,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) StateManager: sm, stages: stages, } - if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil { + if has, err := nd.MetadataDS.Has(ctx, sim.key("head")); err != nil { return nil, err } else if has { return nil, xerrors.Errorf("simulation named %s already exists", name) @@ -155,7 +155,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) // ListSims lists all simulations. func (nd *Node) ListSims(ctx context.Context) ([]string, error) { prefix := simulationPrefix.ChildString("head").String() - items, err := nd.MetadataDS.Query(query.Query{ + items, err := nd.MetadataDS.Query(ctx, query.Query{ Prefix: prefix, KeysOnly: true, Orders: []query.Order{query.OrderByKey{}}, @@ -192,7 +192,7 @@ func (nd *Node) DeleteSim(ctx context.Context, name string) error { var err error for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(name) - err = multierr.Append(err, nd.MetadataDS.Delete(key)) + err = multierr.Append(err, nd.MetadataDS.Delete(ctx, key)) } return err } @@ -209,7 +209,7 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { values := make(map[string][]byte) for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(oldName) - value, err := nd.MetadataDS.Get(key) + value, err := nd.MetadataDS.Get(ctx, key) if err == datastore.ErrNotFound { continue } else if err != nil { @@ -226,9 +226,9 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { key := simulationPrefix.ChildString(field).ChildString(newName) var err error if value, ok := values[field]; ok { - err = nd.MetadataDS.Put(key, value) + err = nd.MetadataDS.Put(ctx, key, value) } else { - err = nd.MetadataDS.Delete(key) + err = nd.MetadataDS.Delete(ctx, key) } if err != nil { return err diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index 02792e332..5747afe4d 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -90,7 +90,7 @@ type Simulation struct { // loadConfig loads a simulation's config from the datastore. This must be called on startup and may // be called to restore the config from-disk. func (sim *Simulation) loadConfig() error { - configBytes, err := sim.Node.MetadataDS.Get(sim.key("config")) + configBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key("config")) if err == nil { err = json.Unmarshal(configBytes, &sim.config) } @@ -111,7 +111,7 @@ func (sim *Simulation) saveConfig() error { if err != nil { return err } - return sim.Node.MetadataDS.Put(sim.key("config"), buf) + return sim.Node.MetadataDS.Put(context.TODO(), sim.key("config"), buf) } var simulationPrefix = datastore.NewKey("/simulation") @@ -124,7 +124,7 @@ func (sim *Simulation) key(subkey string) datastore.Key { // loadNamedTipSet the tipset with the given name (for this simulation) func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { - tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name)) + tskBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key(name)) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err) } @@ -132,7 +132,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { if err != nil { return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err) } - ts, err := sim.Node.Chainstore.LoadTipSet(tsk) + ts, err := sim.Node.Chainstore.LoadTipSet(context.TODO(), tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err) } @@ -141,7 +141,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { // storeNamedTipSet stores the tipset at name (relative to the simulation). func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error { - if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil { + if err := sim.Node.MetadataDS.Put(context.TODO(), sim.key(name), ts.Key().Bytes()); err != nil { return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err) } return nil @@ -308,7 +308,7 @@ func (sim *Simulation) Walk( stCid = ts.MinTicketBlock().ParentStateRoot recCid = ts.MinTicketBlock().ParentMessageReceipts - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return xerrors.Errorf("loading parent: %w", err) } @@ -342,7 +342,7 @@ func (sim *Simulation) Walk( break } - msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts) + msgs, err := sim.Node.Chainstore.MessagesForTipset(ctx, job.ts) if err != nil { return err } diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml index b08a2157e..4453f49ec 100644 --- a/cmd/lotus-stats/docker-compose.yml +++ b/cmd/lotus-stats/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: influxdb: - image: influxdb:latest + image: influxdb:1.8 container_name: influxdb ports: - "18086:8086" diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index b4c13ea8c..706a453eb 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -2,18 +2,36 @@ package main import ( "context" + "net/http" + _ "net/http/pprof" "os" + "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/tools/stats" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/ipldstore" + "github.com/filecoin-project/lotus/tools/stats/metrics" + "github.com/filecoin-project/lotus/tools/stats/points" + "github.com/filecoin-project/lotus/tools/stats/sync" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" + + "contrib.go.opencensus.io/exporter/prometheus" + stats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" ) var log = logging.Logger("stats") +func init() { + if err := view.Register(metrics.DefaultViews...); err != nil { + log.Fatal(err) + } +} + func main() { local := []*cli.Command{ runCmd, @@ -37,7 +55,7 @@ func main() { }, }, Before: func(cctx *cli.Context) error { - return logging.SetLogLevel("stats", cctx.String("log-level")) + return logging.SetLogLevelRegex("stats/*", cctx.String("log-level")) }, Commands: local, } @@ -104,6 +122,12 @@ var runCmd = &cli.Command{ Usage: "do not wait for chain sync to complete", Value: false, }, + &cli.IntFlag{ + Name: "ipld-store-cache-size", + Usage: "size of lru cache for ChainReadObj", + EnvVars: []string{"LOTUS_STATS_IPLD_STORE_CACHE_SIZE"}, + Value: 2 << 15, + }, }, Action: func(cctx *cli.Context) error { ctx := context.Background() @@ -118,30 +142,35 @@ var runCmd = &cli.Command{ influxPasswordFlag := cctx.String("influx-password") influxDatabaseFlag := cctx.String("influx-database") + ipldStoreCacheSizeFlag := cctx.Int("ipld-store-cache-size") + log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag) - influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) + influxClient, err := influx.NewClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) if err != nil { - log.Fatal(err) + return err } + exporter, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "lotus_stats", + }) + if err != nil { + return err + } + + go func() { + http.Handle("/metrics", exporter) + if err := http.ListenAndServe(":6688", nil); err != nil { + log.Errorw("failed to start http server", "err", err) + } + }() + if resetFlag { - if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil { - log.Fatal(err) + if err := influx.ResetDatabase(influxClient, influxDatabaseFlag); err != nil { + return err } } - height := int64(heightFlag) - - if !resetFlag && height == 0 { - h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag) - if err != nil { - log.Info(err) - } - - height = h - } - api, closer, err := lcli.GetFullNodeAPI(cctx) if err != nil { return err @@ -149,12 +178,89 @@ var runCmd = &cli.Command{ defer closer() if !noSyncFlag { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) + if err := sync.SyncWait(ctx, api); err != nil { + return err } } - stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag) + gtp, err := api.ChainGetGenesis(ctx) + if err != nil { + return err + } + + genesisTime := time.Unix(int64(gtp.MinTimestamp()), 0) + + // When height is set to `0` we will resume from the best height we can. + // The goal is to ensure we have data in the last 60 tipsets + height := int64(heightFlag) + if !resetFlag && height == 0 { + lastHeight, err := influx.GetLastRecordedHeight(influxClient, influxDatabaseFlag) + if err != nil { + return err + } + + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + startOfWindowHeight := expectedHeight - 60 + + if lastHeight > startOfWindowHeight { + height = lastHeight + } else { + height = startOfWindowHeight + } + + ts, err := api.ChainHead(ctx) + if err != nil { + return err + } + + headHeight := int64(ts.Height()) + if headHeight < height { + height = headHeight + } + } + + go func() { + t := time.NewTicker(time.Second) + + for { + select { + case <-t.C: + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + stats.Record(ctx, metrics.TipsetCollectionHeightExpected.M(expectedHeight)) + } + } + }() + + store, err := ipldstore.NewApiIpldStore(ctx, api, ipldStoreCacheSizeFlag) + if err != nil { + return err + } + + collector, err := points.NewChainPointCollector(ctx, store, api) + if err != nil { + return err + } + + tipsets, err := sync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headLagFlag) + if err != nil { + return err + } + + wq := influx.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + log.Warnw("failed to collect points", "err", err) + } else { + nb.SetDatabase(influxDatabaseFlag) + wq.AddBatch(nb) + } + } return nil }, diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go index d41e0c098..4bdd21322 100644 --- a/cmd/lotus/backup.go +++ b/cmd/lotus/backup.go @@ -1,7 +1,6 @@ package main import ( - "context" "os" dstore "github.com/ipfs/go-datastore" @@ -88,7 +87,7 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(cctx.Context, "/metadata") if err != nil { return err } @@ -111,10 +110,10 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Resetting chainstore metadata") chainHead := dstore.NewKey("head") - if err := mds.Delete(chainHead); err != nil { + if err := mds.Delete(cctx.Context, chainHead); err != nil { return xerrors.Errorf("clearing chain head: %w", err) } - if err := store.FlushValidationCache(mds); err != nil { + if err := store.FlushValidationCache(cctx.Context, mds); err != nil { return xerrors.Errorf("clearing chain validation cache: %w", err) } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 3f5de8138..813a0a9bd 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -474,7 +474,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to open blockstore: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -499,14 +499,14 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) bar.Units = pb.U_BYTES bar.Start() - ts, err := cst.Import(br) + ts, err := cst.Import(ctx, br) bar.Finish() if err != nil { return xerrors.Errorf("importing chain failed: %w", err) } - if err := cst.FlushValidationCache(); err != nil { + if err := cst.FlushValidationCache(ctx); err != nil { return xerrors.Errorf("flushing validation cache failed: %w", err) } @@ -515,7 +515,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - err = cst.SetGenesis(gb.Blocks()[0]) + err = cst.SetGenesis(ctx, gb.Blocks()[0]) if err != nil { return err } diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 7aa2e704e..c19b9fce4 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -39,7 +39,7 @@ func main() { jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -47,7 +47,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if originBefore != nil { diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 040005641..ba54a7f9e 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -113,14 +113,14 @@ func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} { return ret } -func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (pb *proxyingBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { pb.lk.Lock() if pb.tracing { pb.traced[cid] = struct{}{} } pb.lk.Unlock() - if block, err := pb.Blockstore.Get(cid); err == nil { + if block, err := pb.Blockstore.Get(ctx, cid); err == nil { return block, err } @@ -134,7 +134,7 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return nil, err } - err = pb.Blockstore.Put(block) + err = pb.Blockstore.Put(ctx, block) if err != nil { return nil, err } @@ -142,16 +142,16 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return block, nil } -func (pb *proxyingBlockstore) Put(block blocks.Block) error { +func (pb *proxyingBlockstore) Put(ctx context.Context, block blocks.Block) error { pb.lk.Lock() if pb.tracing { pb.traced[block.Cid()] = struct{}{} } pb.lk.Unlock() - return pb.Blockstore.Put(block) + return pb.Blockstore.Put(ctx, block) } -func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { +func (pb *proxyingBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { pb.lk.Lock() if pb.tracing { for _, b := range blocks { @@ -159,5 +159,5 @@ func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { } } pb.lk.Unlock() - return pb.Blockstore.PutMany(blocks) + return pb.Blockstore.PutMany(ctx, blocks) } diff --git a/conformance/driver.go b/conformance/driver.go index 8669089da..c6a20e359 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -153,6 +153,14 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params results: []*vm.ApplyRet{}, } + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + vmopt.CircSupplyCalc = func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + return vm.NewVM(ctx, vmopt) + }) + postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(), sm, params.ParentEpoch, diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go index c34980efe..4284f98ff 100644 --- a/conformance/rand_fixed.go +++ b/conformance/rand_fixed.go @@ -3,6 +3,8 @@ package conformance import ( "context" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" @@ -19,22 +21,10 @@ func NewFixedRand() vm.Rand { return &fixedRand{} } -func (r *fixedRand) GetChainRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomness(_ context.Context, _ network.Version, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } -func (r *fixedRand) GetChainRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV3(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ network.Version, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } diff --git a/conformance/rand_record.go b/conformance/rand_record.go index 97bd93eb4..f6eeaa6c9 100644 --- a/conformance/rand_record.go +++ b/conformance/rand_record.go @@ -5,6 +5,8 @@ import ( "fmt" "sync" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" @@ -45,17 +47,9 @@ func (r *RecordingRand) loadHead() { r.head = head.Key() } -func (r *RecordingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetChainRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) - // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back + // FullNode's v0 ChainGetRandomnessFromTickets handles whether we should be looking forward or back ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) if err != nil { return ret, err @@ -79,19 +73,7 @@ func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.Doma return ret, err } -func (r *RecordingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) ret, err := r.api.StateGetRandomnessFromBeacon(ctx, pers, round, entropy, r.head) if err != nil { diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go index 5d850f7eb..1907ddf24 100644 --- a/conformance/rand_replay.go +++ b/conformance/rand_replay.go @@ -4,6 +4,8 @@ import ( "bytes" "context" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" @@ -43,15 +45,7 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { return nil, false } -func (r *ReplayingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetChainRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessChain, DomainSeparationTag: int64(pers), @@ -66,26 +60,10 @@ func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.Doma r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetChainRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetChainRandomnessV2(ctx, pers, round, entropy) + return r.fallback.GetChainRandomness(ctx, nv, pers, round, entropy) } -func (r *ReplayingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, nv network.Version, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessBeacon, DomainSeparationTag: int64(pers), @@ -100,9 +78,5 @@ func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.Dom r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetBeaconRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetBeaconRandomnessV3(ctx, pers, round, entropy) + return r.fallback.GetBeaconRandomness(ctx, nv, pers, round, entropy) } diff --git a/conformance/runner.go b/conformance/runner.go index 1044bb329..e597f0bbe 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -282,7 +282,7 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er continue } // ignore things we don't have, the state tree is incomplete. - if has, err := bs.Has(link.Cid); err != nil { + if has, err := bs.Has(context.TODO(), link.Cid); err != nil { return nil, err } else if has { out = append(out, link) @@ -317,7 +317,7 @@ func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, defer r.Close() // nolint // Load the CAR embedded in the test vector into the Blockstore. - _, err = car.LoadCar(bs, r) + _, err = car.LoadCar(context.TODO(), bs, r) if err != nil { return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) } diff --git a/docker-compose.yaml b/docker-compose.yaml index b962d5cc2..d68eed8db 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -103,6 +103,7 @@ services: # - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http # - LOTUS_JAEGER_AGENT_HOST=jaeger # - LOTUS_JAEGER_AGENT_PORT=6831 + # - DOCKER_LOTUS_MINER_INIT=true # deploy: # restart_policy: # condition: on-failure diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 4d14bcb0e..fbe8b6eb5 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -94,9 +94,13 @@ * [ReturnAddPiece](#ReturnAddPiece) * [ReturnFetch](#ReturnFetch) * [ReturnFinalizeSector](#ReturnFinalizeSector) + * [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData) * [ReturnMoveStorage](#ReturnMoveStorage) + * [ReturnProveReplicaUpdate1](#ReturnProveReplicaUpdate1) + * [ReturnProveReplicaUpdate2](#ReturnProveReplicaUpdate2) * [ReturnReadPiece](#ReturnReadPiece) * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed) + * [ReturnReplicaUpdate](#ReturnReplicaUpdate) * [ReturnSealCommit1](#ReturnSealCommit1) * [ReturnSealCommit2](#ReturnSealCommit2) * [ReturnSealPreCommit1](#ReturnSealPreCommit1) @@ -138,6 +142,7 @@ * [StorageDeclareSector](#StorageDeclareSector) * [StorageDropSector](#StorageDropSector) * [StorageFindSector](#StorageFindSector) + * [StorageGetLocks](#StorageGetLocks) * [StorageInfo](#StorageInfo) * [StorageList](#StorageList) * [StorageLocal](#StorageLocal) @@ -1440,6 +1445,30 @@ Response: `{}` ### ReturnFinalizeSector +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnGenerateSectorKeyFromData + + Perms: admin Inputs: @@ -1485,6 +1514,56 @@ Inputs: Response: `{}` +### ReturnProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + null, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnReadPiece @@ -1534,6 +1613,38 @@ Inputs: Response: `{}` +### ReturnReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "NewSealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "NewUnsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnSealCommit1 @@ -2148,7 +2259,9 @@ Inputs: "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": null, + "AllowTo": null }, { "Capacity": 9, @@ -2238,6 +2351,41 @@ Inputs: Response: `null` +### StorageGetLocks + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "Locks": [ + { + "Sector": { + "Miner": 1000, + "Number": 123 + }, + "Write": [ + 0, + 0, + 1, + 0, + 0 + ], + "Read": [ + 2, + 3, + 0, + 0, + 0 + ] + } + ] +} +``` + ### StorageInfo @@ -2258,7 +2406,9 @@ Response: "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": null, + "AllowTo": null } ``` @@ -2449,18 +2599,595 @@ Response: "IgnoreResources": false, "Resources": { "MemPhysical": 274877906944, + "MemUsed": 2147483648, "MemSwap": 128849018880, - "MemReserved": 2147483648, + "MemSwapUsed": 2147483648, "CPUs": 64, "GPUs": [ "aGPU 1337" - ] + ], + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } }, "Enabled": true, "MemUsedMin": 0, "MemUsedMax": 0, - "GpuUsed": false, + "GpuUsed": 0, "CpuUse": 0 } } diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index c620113f4..35f337121 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -11,12 +11,19 @@ * [AddPiece](#AddPiece) * [Finalize](#Finalize) * [FinalizeSector](#FinalizeSector) +* [Generate](#Generate) + * [GenerateSectorKeyFromData](#GenerateSectorKeyFromData) * [Move](#Move) * [MoveStorage](#MoveStorage) * [Process](#Process) * [ProcessSession](#ProcessSession) +* [Prove](#Prove) + * [ProveReplicaUpdate1](#ProveReplicaUpdate1) + * [ProveReplicaUpdate2](#ProveReplicaUpdate2) * [Release](#Release) * [ReleaseUnsealed](#ReleaseUnsealed) +* [Replica](#Replica) + * [ReplicaUpdate](#ReplicaUpdate) * [Seal](#Seal) * [SealCommit1](#SealCommit1) * [SealCommit2](#SealCommit2) @@ -92,10 +99,587 @@ Response: "IgnoreResources": true, "Resources": { "MemPhysical": 42, + "MemUsed": 42, "MemSwap": 42, - "MemReserved": 42, + "MemSwapUsed": 42, "CPUs": 42, - "GPUs": null + "GPUs": null, + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } } ``` @@ -215,6 +799,41 @@ Response: } ``` +## Generate + + +### GenerateSectorKeyFromData + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + ## Move @@ -262,12 +881,125 @@ Inputs: `null` Response: `"07070707-0707-0707-0707-070707070707"` +## Prove + + +### ProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### ProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + ## Release ### ReleaseUnsealed +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + null +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Replica + + +### ReplicaUpdate + + Perms: admin Inputs: diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 4d9530821..257da9d13 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -1269,7 +1269,8 @@ Response: "Stages": { "Stages": null } - } + }, + "Event": 5 } ``` @@ -4698,7 +4699,7 @@ Inputs: ] ``` -Response: `14` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -5016,7 +5017,8 @@ Response: "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index b03f75e9d..9624f0e26 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -41,6 +41,7 @@ * [ClientDataTransferUpdates](#ClientDataTransferUpdates) * [ClientDealPieceCID](#ClientDealPieceCID) * [ClientDealSize](#ClientDealSize) + * [ClientExport](#ClientExport) * [ClientFindData](#ClientFindData) * [ClientGenCar](#ClientGenCar) * [ClientGetDealInfo](#ClientGetDealInfo) @@ -59,7 +60,7 @@ * [ClientRestartDataTransfer](#ClientRestartDataTransfer) * [ClientRetrieve](#ClientRetrieve) * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) + * [ClientRetrieveWait](#ClientRetrieveWait) * [ClientStartDeal](#ClientStartDeal) * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) @@ -108,6 +109,7 @@ * [MsigApprove](#MsigApprove) * [MsigApproveTxnHash](#MsigApproveTxnHash) * [MsigCancel](#MsigCancel) + * [MsigCancelTxnHash](#MsigCancelTxnHash) * [MsigCreate](#MsigCreate) * [MsigGetAvailableBalance](#MsigGetAvailableBalance) * [MsigGetPending](#MsigGetPending) @@ -1054,6 +1056,32 @@ Response: } ``` +### ClientExport +ClientExport exports a file stored in the local filestore to a system file + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DAGs": null, + "FromLocalCAR": "string value", + "DealID": 5 + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + ### ClientFindData ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). @@ -1281,7 +1309,8 @@ Response: "Stages": { "Stages": null } - } + }, + "Event": 5 } ``` @@ -1481,9 +1510,8 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", + "DataSelector": "Links/21/Hash/Links/42/Hash", "Size": 42, - "FromLocalCAR": "string value", "Total": "0", "UnsealPrice": "0", "PaymentInterval": 42, @@ -1495,15 +1523,16 @@ Inputs: "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "PieceCID": null } - }, - { - "Path": "string value", - "IsCAR": true } ] ``` -Response: `{}` +Response: +```json +{ + "DealID": 5 +} +``` ### ClientRetrieveTryRestartInsufficientFunds ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel @@ -1521,9 +1550,8 @@ Inputs: Response: `{}` -### ClientRetrieveWithEvents -ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel -of status updates. +### ClientRetrieveWait +ClientRetrieveWait waits for retrieval to be complete Perms: admin @@ -1531,43 +1559,11 @@ Perms: admin Inputs: ```json [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "FromLocalCAR": "string value", - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } - }, - { - "Path": "string value", - "IsCAR": true - } + 5 ] ``` -Response: -```json -{ - "Event": 5, - "Status": 0, - "BytesReceived": 42, - "FundsSpent": "0", - "Err": "string value" -} -``` +Response: `{}` ### ClientStartDeal ClientStartDeal proposes a deal with a miner. @@ -2702,6 +2698,44 @@ Response: ### MsigCancel MsigCancel cancels a previously-proposed multisig message +It takes the following params: , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigCancelTxnHash +MsigCancel cancels a previously-proposed multisig message It takes the following params: , , , , , , @@ -4948,7 +4982,7 @@ Inputs: ] ``` -Response: `14` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -5223,7 +5257,8 @@ Response: "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index b80238871..fa4f08b4b 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.13.2-dev + 1.13.3-dev COMMANDS: init Initialize a lotus miner repo @@ -590,7 +590,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` @@ -1459,7 +1460,8 @@ USAGE: lotus-miner pieces list-cids [command options] [arguments...] OPTIONS: - --help, -h show help (default: false) + --verbose, -v (default: false) + --help, -h show help (default: false) ``` @@ -1939,6 +1941,7 @@ COMMANDS: list list local storage paths find find sector in the storage system cleanup trigger cleanup actions + locks show active sector locks help, h Shows a list of commands or help for one command OPTIONS: @@ -1982,6 +1985,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` @@ -2046,6 +2051,19 @@ OPTIONS: ``` +### lotus-miner storage locks +``` +NAME: + lotus-miner storage locks - show active sector locks + +USAGE: + lotus-miner storage locks [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner sealing ``` NAME: diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index da6cefd18..ac133f908 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.13.2-dev + 1.13.3-dev COMMANDS: run Start lotus worker @@ -15,6 +15,7 @@ COMMANDS: storage manage sector storage set Manage worker settings wait-quiet Block until all running tasks exit + resources Manage resource table overrides tasks Manage task processing help, h Shows a list of commands or help for one command @@ -94,6 +95,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` @@ -125,6 +128,21 @@ OPTIONS: ``` +## lotus-worker resources +``` +NAME: + lotus-worker resources - Manage resource table overrides + +USAGE: + lotus-worker resources [command options] [arguments...] + +OPTIONS: + --all print all resource envvars (default: false) + --default print default resource envvars (default: false) + --help, -h show help (default: false) + +``` + ## lotus-worker tasks ``` NAME: diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index f6887bb6c..eaefe25ce 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.13.2-dev + 1.13.3-dev COMMANDS: daemon Start a lotus daemon process @@ -426,6 +426,8 @@ COMMANDS: RETRIEVAL: find Find data in the network retrieve Retrieve data from network + cat Show data from network + ls List object links cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer list-retrievals List retrieval market deals STORAGE: @@ -544,12 +546,94 @@ USAGE: CATEGORY: RETRIEVAL +DESCRIPTION: + Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt + + OPTIONS: + --car Export to a car file instead of a regular file (default: false) + --data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector + --car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false) + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client cat +``` +NAME: + lotus client cat - Show data from network + +USAGE: + lotus client cat [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector --from value address to send transactions from - --car export to a car file instead of a regular file (default: false) - --miner value miner address for retrieval, if not present it'll use local discovery - --datamodel-path-selector value a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal - --maxPrice value maximum price the client is willing to consider (default: 0.01 FIL) + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client ls +``` +NAME: + lotus client ls - List object links + +USAGE: + lotus client ls [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --depth value list links recursively up to the specified depth (default: 1) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) --pieceCid value require data to be retrieved from a specific Piece CID --allow-local (default: false) --help, -h show help (default: false) @@ -857,6 +941,7 @@ COMMANDS: propose Propose a multisig transaction propose-remove Propose to remove a signer approve Approve a multisig message + cancel Cancel a multisig message add-propose Propose to add a signer add-approve Approve a message to add a signer add-cancel Cancel a message to add a signer @@ -952,6 +1037,20 @@ OPTIONS: ``` +### lotus msig cancel +``` +NAME: + lotus msig cancel - Cancel a multisig message + +USAGE: + lotus msig cancel [command options] [destination value [methodId methodParams]] + +OPTIONS: + --from value account to send the cancel message from + --help, -h show help (default: false) + +``` + ### lotus msig add-propose ``` NAME: @@ -1580,8 +1679,18 @@ OPTIONS: --help, -h show help (default: false) ``` -# nage + +### lotus mpool manage ``` +NAME: + lotus mpool manage - + +USAGE: + lotus mpool manage [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + ``` ## lotus state @@ -2450,7 +2559,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index d402f65ed..b034698a2 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -207,6 +207,17 @@ # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE #SimultaneousTransfersForStorage = 20 + # The maximum number of simultaneous data transfers from any single client + # for storage deals. + # Unset by default (0), and values higher than SimultaneousTransfersForStorage + # will have no effect; i.e. the total number of simultaneous data transfers + # across all storage clients is bound by SimultaneousTransfersForStorage + # regardless of this number. + # + # type: uint64 + # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT + #SimultaneousTransfersForStoragePerClient = 0 + # The maximum number of parallel online data transfers for retrieval deals # # type: uint64 diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 791238933..52d80081b 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 7912389334e347bbb2eac0520c836830875c39de +Subproject commit 52d80081bfdd8a30bc44bcfe44cb0f299615b9f3 diff --git a/extern/sector-storage/cgroups.go b/extern/sector-storage/cgroups.go new file mode 100644 index 000000000..e2ec0564e --- /dev/null +++ b/extern/sector-storage/cgroups.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package sectorstorage + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} diff --git a/extern/sector-storage/cgroups_linux.go b/extern/sector-storage/cgroups_linux.go new file mode 100644 index 000000000..38fe88f19 --- /dev/null +++ b/extern/sector-storage/cgroups_linux.go @@ -0,0 +1,117 @@ +//go:build linux +// +build linux + +package sectorstorage + +import ( + "bufio" + "bytes" + "math" + "os" + "path/filepath" + + "github.com/containerd/cgroups" + cgroupv2 "github.com/containerd/cgroups/v2" +) + +func cgroupV2MountPoint() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() //nolint + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) >= 9 && bytes.Equal(fields[8], []byte("cgroup2")) { + return string(fields[4]), nil + } + } + return "", cgroups.ErrMountPointNotExist +} + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + path := cgroups.NestedPath("") + if pid := os.Getpid(); pid == 1 { + path = cgroups.RootPath + } + c, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), path) + if err != nil { + return 0, 0, 0, 0, err + } + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + if stats.Memory == nil { + return 0, 0, 0, 0, nil + } + if stats.Memory.Usage != nil { + memoryMax = stats.Memory.Usage.Limit + // Exclude cached files + memoryUsed = stats.Memory.Usage.Usage - stats.Memory.InactiveFile - stats.Memory.ActiveFile + } + if stats.Memory.Swap != nil { + swapMax = stats.Memory.Swap.Limit + swapUsed = stats.Memory.Swap.Usage + } + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2MemFromPath(mp, path string) (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + c, err := cgroupv2.LoadManager(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + + if stats.Memory != nil { + memoryMax = stats.Memory.UsageLimit + // Exclude memory used caching files + memoryUsed = stats.Memory.Usage - stats.Memory.File + swapMax = stats.Memory.SwapLimit + swapUsed = stats.Memory.SwapUsage + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + memoryMax = math.MaxUint64 + swapMax = math.MaxUint64 + + path, err := cgroupv2.PidGroupPath(os.Getpid()) + if err != nil { + return 0, 0, 0, 0, err + } + + mp, err := cgroupV2MountPoint() + if err != nil { + return 0, 0, 0, 0, err + } + + for path != "/" { + cgMemoryMax, cgMemoryUsed, cgSwapMax, cgSwapUsed, err := cgroupV2MemFromPath(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + if cgMemoryMax != 0 && cgMemoryMax < memoryMax { + log.Debugf("memory limited by cgroup %s: %v", path, cgMemoryMax) + memoryMax = cgMemoryMax + memoryUsed = cgMemoryUsed + } + if cgSwapMax != 0 && cgSwapMax < swapMax { + log.Debugf("swap limited by cgroup %s: %v", path, cgSwapMax) + swapMax = cgSwapMax + swapUsed = cgSwapUsed + } + path = filepath.Dir(path) + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 59770ec9a..ec8554f34 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -7,6 +7,9 @@ import ( "bufio" "bytes" "context" + "crypto/rand" + "encoding/base64" + "encoding/json" "io" "math/bits" "os" @@ -21,6 +24,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/detailyang/go-fallocate" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" @@ -250,6 +254,23 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err return pieceCID, werr() } +func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string) (bool, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage) + if xerrors.Is(err, storiface.ErrSectorNotFound) { + return false, nil + } else if err != nil { + return false, xerrors.Errorf("reading updated replica: %w", err) + } + defer done() + + // Sector data stored in replica update + updateProof, err := sector.ProofType.RegisteredUpdateProof() + if err != nil { + return false, err + } + return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, paths.Update, paths.Sealed, paths.Cache, commD) +} + func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { ssize, err := sector.ProofType.SectorSize() if err != nil { @@ -300,6 +321,16 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off return nil } + // If piece data stored in updated replica decode whole sector + decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed) + if err != nil { + return xerrors.Errorf("decoding sector from replica: %w", err) + } + if decoded { + return pf.MarkAllocated(0, maxPieceSize) + } + + // Piece data sealed in sector srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) @@ -530,9 +561,19 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, if err != nil { return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } - return p1o, nil + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(p1o, &p1odec); err != nil { + return nil, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + p1odec["_lotus_SealRandomness"] = ticket + + return json.Marshal(&p1odec) } +var PC2CheckRounds = 3 + func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { @@ -545,6 +586,50 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("get ssize: %w", err) + } + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(phase1Out, &p1odec); err != nil { + return storage.SectorCids{}, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + var ticket abi.SealRandomness + ti, found := p1odec["_lotus_SealRandomness"] + + if found { + ticket, err = base64.StdEncoding.DecodeString(ti.(string)) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("decoding ticket: %w", err) + } + + for i := 0; i < PC2CheckRounds; i++ { + var sd [32]byte + _, _ = rand.Read(sd[:]) + + _, err := ffi.SealCommitPhase1( + sector.ProofType, + sealedCID, + unsealedCID, + paths.Cache, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, + ticket, + sd[:], + []abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}}, + ) + if err != nil { + log.Warn("checking PreCommit failed: ", err) + log.Warnf("num:%d tkt:%v seed:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sd[:], sealedCID, unsealedCID) + + return storage.SectorCids{}, xerrors.Errorf("checking PreCommit failed: %w", err) + } + } + } + return storage.SectorCids{ Unsealed: unsealedCID, Sealed: sealedCID, @@ -582,6 +667,109 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) } +func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + empty := storage.ReplicaUpdateOut{} + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + if err != nil { + return empty, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + s, err := os.Stat(paths.Sealed) + if err != nil { + return empty, err + } + sealedSize := s.Size() + + u, err := os.OpenFile(paths.Update, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return empty, xerrors.Errorf("ensuring updated replica file exists: %w", err) + } + if err := fallocate.Fallocate(u, 0, sealedSize); err != nil { + return empty, xerrors.Errorf("allocating space for replica update file: %w", err) + } + + if err := u.Close(); err != nil { + return empty, err + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint + if os.IsExist(err) { + log.Warnf("existing cache in %s; removing", paths.Cache) + + if err := os.RemoveAll(paths.UpdateCache); err != nil { + return empty, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err) + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint:gosec + return empty, xerrors.Errorf("mkdir cache path after cleanup: %w", err) + } + } else { + return empty, err + } + } + sealed, unsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache, paths.Unsealed, pieces) + if err != nil { + return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err) + } + + return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil +} + +func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing) + if err != nil { + return nil, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + vanillaProofs, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, sectorKey, newSealed, newUnsealed, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache) + if err != nil { + return nil, xerrors.Errorf("failed to generate proof of replica update for sector %d: %w", sector.ID.Number, err) + } + return vanillaProofs, nil +} + +func (sb *Sealer) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, sectorKey, newSealed, newUnsealed, vanillaProofs) +} + +func (sb *Sealer) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed, storiface.PathSealing) + defer done() + if err != nil { + return xerrors.Errorf("failed to acquire sector paths: %w", err) + } + + s, err := os.Stat(paths.Update) + if err != nil { + return xerrors.Errorf("measuring update file size: %w", err) + } + sealedSize := s.Size() + e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return xerrors.Errorf("ensuring sector key file exists: %w", err) + } + if err := fallocate.Fallocate(e, 0, sealedSize); err != nil { + return xerrors.Errorf("allocating space for sector key file: %w", err) + } + if err := e.Close(); err != nil { + return err + } + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.RemoveData(updateProofType, paths.Sealed, paths.Cache, paths.Update, paths.UpdateCache, paths.Unsealed, commD) +} + +func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ssize, err := sector.ProofType.SectorSize() if err != nil { diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index a5b2fdf1f..1da7ea832 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,6 +4,8 @@ import ( "context" "io" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -36,6 +38,7 @@ type Storage interface { type Verifier interface { VerifySeal(proof5.SealVerifyInfo) (bool, error) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index ff35ddc1f..66064b1f3 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -12,6 +12,7 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" @@ -120,6 +121,10 @@ func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPr return ffi.VerifyAggregateSeals(aggregate) } +func (proofVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return ffi.SectorUpdate.VerifyUpdateProof(update) +} + func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") diff --git a/extern/sector-storage/fr32/fr32.go b/extern/sector-storage/fr32/fr32.go index 17e6a1142..24175719c 100644 --- a/extern/sector-storage/fr32/fr32.go +++ b/extern/sector-storage/fr32/fr32.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" ) -var MTTresh = uint64(32 << 20) +var MTTresh = uint64(512 << 10) func mtChunkCount(usz abi.PaddedPieceSize) uint64 { threads := (uint64(usz)) / MTTresh diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go index f14d5bf1c..163c520aa 100644 --- a/extern/sector-storage/fr32/readers.go +++ b/extern/sector-storage/fr32/readers.go @@ -16,13 +16,21 @@ type unpadReader struct { work []byte } +func BufSize(sz abi.PaddedPieceSize) int { + return int(MTTresh * mtChunkCount(sz)) +} + func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { + buf := make([]byte, BufSize(sz)) + + return NewUnpadReaderBuf(src, sz, buf) +} + +func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { if err := sz.Validate(); err != nil { return nil, xerrors.Errorf("bad piece size: %w", err) } - buf := make([]byte, MTTresh*mtChunkCount(sz)) - return &unpadReader{ src: src, diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 430313730..748681544 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -51,13 +51,8 @@ type SectorManager interface { FaultTracker } -type WorkerID uuid.UUID // worker session UUID var ClosedWorkerID = uuid.UUID{} -func (w WorkerID) String() string { - return uuid.UUID(w).String() -} - type Manager struct { ls stores.LocalStorage storage *stores.Remote @@ -578,11 +573,72 @@ func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, return nil } +func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil) +} + +func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTRegenSectorKey, sector, commD) + if err != nil { + return xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + _, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + } + + if wait { // already in progress + waitRes() + return waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed|storiface.FTCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort + selector := newExistingSelector(m.index, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTCache, true) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTRegenSectorKey, selector, m.schedFetch(sector, storiface.FTUpdate|storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.GenerateSectorKeyFromData(ctx, sector, commD)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return err + } + + return waitErr +} + func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } @@ -597,10 +653,161 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } return err } +func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces) + if err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaUpdateOut) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + + selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + + err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return storage.ReplicaUpdateOut{}, err + } + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (out storage.ReplicaVanillaProofs, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate1, sector, sectorKey, newSealed, newUnsealed) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaVanillaProofs) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTUpdate|storiface.FTCache|storiface.FTUpdateCache, storiface.FTNone); err != nil { + return nil, xerrors.Errorf("acquiring sector lock: %w", err) + } + + selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return nil, err + } + + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (out storage.ReplicaUpdateProof, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate2, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaUpdateProof) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + selector := newTaskSelector() + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate2, selector, schedNop, func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)) + if err != nil { + return err + } + + waitRes() + return nil + }) + + if err != nil { + return nil, err + } + + return out, waitErr +} + func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { return m.returnResult(ctx, callID, pi, err) } @@ -629,6 +836,22 @@ func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.Ca return m.returnResult(ctx, callID, nil, err) } +func (m *Manager) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error { + return m.returnResult(ctx, callID, proof, err) +} + +func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) +} + func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(ctx, callID, nil, err) } diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index 332a08817..7c8703e89 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -385,7 +385,6 @@ func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r i if ok { return xerrors.Errorf("result for call %v already reported", wid) } - m.results[wid] = res err := m.work.Get(wid).Mutate(func(ws *WorkState) error { diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index aedd284e3..cc1f02a9a 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -18,10 +19,12 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -66,7 +69,12 @@ func newTestStorage(t *testing.T) *testStorage { } func (t testStorage) cleanup() { + noCleanup := os.Getenv("LOTUS_TEST_NO_CLEANUP") != "" for _, path := range t.StoragePaths { + if noCleanup { + fmt.Printf("Not cleaning up test storage at %s\n", path) + continue + } if err := os.RemoveAll(path.Path); err != nil { fmt.Println("Cleanup error:", err) } @@ -163,6 +171,150 @@ func TestSimple(t *testing.T) { require.NoError(t, err) } +type Reader struct{} + +func (Reader) Read(out []byte) (int, error) { + for i := range out { + out[i] = 0 + } + return len(out), nil +} + +type NullReader struct { + *io.LimitedReader +} + +func NewNullReader(size abi.UnpaddedPieceSize) io.Reader { + return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)} +} + +func (m NullReader) NullBytes() int64 { + return m.N +} + +func TestSnapDeals(t *testing.T) { + logging.SetAllLoggers(logging.LevelWarn) + ctx := context.Background() + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore()) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize, + sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal, + sealtasks.TTRegenSectorKey, + } + wds := datastore.NewMapDatastore() + + w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds)) + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + proofType := abi.RegisteredSealProof_StackedDrg2KiBV1 + ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE") + switch ptStr { + case "2k": + case "8M": + proofType = abi.RegisteredSealProof_StackedDrg8MiBV1 + case "512M": + proofType = abi.RegisteredSealProof_StackedDrg512MiBV1 + case "32G": + proofType = abi.RegisteredSealProof_StackedDrg32GiBV1 + case "64G": + proofType = abi.RegisteredSealProof_StackedDrg64GiBV1 + default: + log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'") + log.Warn("Continuing test with 2k sectors") + } + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: proofType, + } + ss, err := proofType.SectorSize() + require.NoError(t, err) + + unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded() + + // Pack sector with no pieces + p0, err := m.AddPiece(ctx, sid, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize)) + require.NoError(t, err) + ccPieces := []abi.PieceInfo{p0} + + // Precommit and Seal a CC sector + fmt.Printf("PC1\n") + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces) + require.NoError(t, err) + fmt.Printf("PC2\n") + pc2Out, err := m.SealPreCommit2(ctx, sid, pc1Out) + require.NoError(t, err) + + // Now do a snap deals replica update + sectorKey := pc2Out.Sealed + + // Two pieces each half the size of the sector + unpaddedPieceSize := unpaddedSectorSize / 2 + p1, err := m.AddPiece(ctx, sid, nil, unpaddedPieceSize, strings.NewReader(strings.Repeat("k", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + p2, err := m.AddPiece(ctx, sid, []abi.UnpaddedPieceSize{p1.Size.Unpadded()}, unpaddedPieceSize, strings.NewReader(strings.Repeat("j", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + pieces := []abi.PieceInfo{p1, p2} + fmt.Printf("RU\n") + startRU := time.Now() + out, err := m.ReplicaUpdate(ctx, sid, pieces) + require.NoError(t, err) + fmt.Printf("RU duration (%s): %s\n", ss.ShortString(), time.Since(startRU)) + + updateProofType, err := sid.ProofType.RegisteredUpdateProof() + require.NoError(t, err) + require.NotNil(t, out) + fmt.Printf("PR1\n") + startPR1 := time.Now() + vanillaProofs, err := m.ProveReplicaUpdate1(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed) + require.NoError(t, err) + require.NotNil(t, vanillaProofs) + fmt.Printf("PR1 duration (%s): %s\n", ss.ShortString(), time.Since(startPR1)) + fmt.Printf("PR2\n") + startPR2 := time.Now() + proof, err := m.ProveReplicaUpdate2(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed, vanillaProofs) + require.NoError(t, err) + require.NotNil(t, proof) + fmt.Printf("PR2 duration (%s): %s\n", ss.ShortString(), time.Since(startPR2)) + + vInfo := proof7.ReplicaUpdateInfo{ + Proof: proof, + UpdateProofType: updateProofType, + OldSealedSectorCID: sectorKey, + NewSealedSectorCID: out.NewSealed, + NewUnsealedSectorCID: out.NewUnsealed, + } + pass, err := ffiwrapper.ProofVerifier.VerifyReplicaUpdate(vInfo) + require.NoError(t, err) + assert.True(t, pass) + + fmt.Printf("Decode\n") + // Remove unsealed data and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, nil)) + startDecode := time.Now() + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + fmt.Printf("Decode duration (%s): %s\n", ss.ShortString(), time.Since(startDecode)) + + // Remove just the first piece and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, []storage.Range{{Offset: p1.Size.Unpadded(), Size: p2.Size.Unpadded()}})) + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + + fmt.Printf("GSK\n") + require.NoError(t, m.ReleaseSectorKey(ctx, sid)) + startGSK := time.Now() + require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed)) + fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK)) + +} + func TestRedoPC1(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) @@ -324,7 +476,7 @@ func TestRestartWorker(t *testing.T) { defer cleanup() localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTAddPiece, sealtasks.TTFetch, } wds := datastore.NewMapDatastore() @@ -334,7 +486,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -371,7 +523,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err = m.AddWorker(ctx, w) require.NoError(t, err) @@ -407,7 +559,7 @@ func TestReenableWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -457,3 +609,123 @@ func TestReenableWorker(t *testing.T) { i, _ = m.sched.Info(ctx) require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) } + +func TestResUse(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax) + } +} + +func TestResOverride(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + if s == "AP_2K_MAX_MEMORY" { + return "99999", true + } + + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, uint64(99999), w.MemUsedMax) + } +} diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 273f0928e..ead4ebe26 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -10,8 +10,11 @@ import ( "math/rand" "sync" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/dagstore/mount" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" @@ -261,6 +264,28 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, ph return out[:], nil } +func (mgr *SectorMgr) ReplicaUpdate(ctx context.Context, sid storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + out := storage.ReplicaUpdateOut{} + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + out := make([][]byte, 0) + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + return make([]byte, 0), nil +} + +func (mgr *SectorMgr) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + return nil +} + +func (mgr *SectorMgr) ReleaseSealed(ctx context.Context, sid storage.SectorRef) error { + return nil +} + // Test Instrumentation Methods func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error { @@ -384,12 +409,22 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if offset != 0 { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if uint64(offset) != 0 { panic("implme") } - return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil + br := bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size]) + + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: ioutil.NopCloser(br), + Seeker: br, + ReaderAt: br, + }, false, nil } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { @@ -456,6 +491,8 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr return bad, nil } +var _ storiface.WorkerReturn = &SectorMgr{} + func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } @@ -500,6 +537,22 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } +func (mgr *SectorMgr) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateProof, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + panic("not supported") +} + func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { @@ -547,6 +600,10 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri return ok, nil } +func (m mockVerifProver) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return true, nil +} + func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length for pi, proof := range proofs { diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go index ad3a2543e..4622289e8 100644 --- a/extern/sector-storage/piece_provider.go +++ b/extern/sector-storage/piece_provider.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" @@ -23,7 +24,11 @@ type Unsealer interface { type PieceProvider interface { // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector - ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + // pieceOffset + pieceSize specify piece bounds for unsealing (note: with SDR the entire sector will be unsealed by + // default in most cases, but this might matter with future PoRep) + // startOffset is added to the pieceOffset to get the starting reader offset. + // The number of bytes that can be read is pieceSize-startOffset + ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } @@ -67,50 +72,104 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef // It will NOT try to schedule an Unseal of a sealed sector file for the read. // // Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. -func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (mount.Reader, error) { // acquire a lock purely for reading unsealed sectors ctx, cancel := context.WithCancel(ctx) if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { cancel() - return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + return nil, xerrors.Errorf("acquiring read sector lock: %w", err) } - // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // Reader returns a reader getter for an unsealed piece at the given offset in the given sector. // The returned reader will be nil if none of the workers has an unsealed sector file containing // the unsealed piece. - r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + rg, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(pieceOffset.Padded()), size.Padded()) if err != nil { + cancel() log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) - cancel() - return nil, nil, err + return nil, err } - if r == nil { + if rg == nil { cancel() + return nil, nil } - return r, cancel, nil + buf := make([]byte, fr32.BufSize(size.Padded())) + + pr, err := (&pieceReader{ + ctx: ctx, + getReader: func(ctx context.Context, startOffset uint64) (io.ReadCloser, error) { + startOffsetAligned := storiface.UnpaddedByteIndex(startOffset / 127 * 127) // floor to multiple of 127 + + r, err := rg(startOffsetAligned.Padded()) + if err != nil { + return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err) + } + + upr, err := fr32.NewUnpadReaderBuf(r, size.Padded(), buf) + if err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("creating unpadded reader: %w", err) + } + + bir := bufio.NewReaderSize(upr, 127) + if startOffset > uint64(startOffsetAligned) { + if _, err := bir.Discard(int(startOffset - uint64(startOffsetAligned))); err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("discarding bytes for startOffset: %w", err) + } + } + + return struct { + io.Reader + io.Closer + }{ + Reader: bir, + Closer: funcCloser(func() error { + return r.Close() + }), + }, nil + }, + len: size, + onClose: cancel, + pieceCid: pc, + }).init() + if err != nil || pr == nil { // pr == nil to make sure we don't return typed nil + cancel() + return nil, err + } + + return pr, err } +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector // If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. // Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. // If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, // the returned boolean parameter will be set to true. // If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. -func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if err := offset.Valid(); err != nil { - return nil, false, xerrors.Errorf("offset is not valid: %w", err) +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if err := pieceOffset.Valid(); err != nil { + return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err) } if err := size.Validate(); err != nil { return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) } - r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err := p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) if xerrors.Is(err, storiface.ErrSectorNotFound) { - log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) err = nil } if err != nil { @@ -129,14 +188,14 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, if unsealed == cid.Undef { commd = nil } - if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + if err := p.uns.SectorsUnsealPiece(ctx, sector, pieceOffset, size, ticket, commd); err != nil { log.Errorf("failed to SectorsUnsealPiece: %s", err) return nil, false, xerrors.Errorf("unsealing piece: %w", err) } - log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("unsealed a sector file to read the piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err = p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) if err != nil { log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) return nil, true, xerrors.Errorf("read after unsealing: %w", err) @@ -145,32 +204,12 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, log.Errorf("got no reader after unsealing piece") return nil, true, xerrors.Errorf("got no reader after unsealing piece") } - log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("got a reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } else { - log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } - upr, err := fr32.NewUnpadReader(r, size.Padded()) - if err != nil { - unlock() - return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) - } + log.Debugf("returning reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) - - return &funcCloser{ - Reader: bufio.NewReaderSize(upr, 127), - close: func() error { - err = r.Close() - unlock() - return err - }, - }, uns, nil + return r, uns, nil } - -type funcCloser struct { - io.Reader - close func() error -} - -func (fc *funcCloser) Close() error { return fc.close() } diff --git a/extern/sector-storage/piece_provider_test.go b/extern/sector-storage/piece_provider_test.go index eb3ffa7c3..3ace2916e 100644 --- a/extern/sector-storage/piece_provider_test.go +++ b/extern/sector-storage/piece_provider_test.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "net/http" + "os" "testing" "github.com/filecoin-project/go-state-types/abi" @@ -286,7 +287,7 @@ func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtas worker := newLocalWorker(nil, WorkerConfig{ TaskTypes: tasks, - }, remote, localStore, p.index, p.mgr, csts) + }, os.LookupEnv, remote, localStore, p.index, p.mgr, csts) p.servers = append(p.servers, svc) p.localStores = append(p.localStores, localStore) diff --git a/extern/sector-storage/piece_reader.go b/extern/sector-storage/piece_reader.go new file mode 100644 index 000000000..d7a3f4e98 --- /dev/null +++ b/extern/sector-storage/piece_reader.go @@ -0,0 +1,180 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "golang.org/x/xerrors" + + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/metrics" +) + +// For small read skips, it's faster to "burn" some bytes than to setup new sector reader. +// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB. +var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M +var ReadBuf = 128 * (127 * 8) // unpadded(128k) + +type pieceGetter func(ctx context.Context, offset uint64) (io.ReadCloser, error) + +type pieceReader struct { + ctx context.Context + getReader pieceGetter + pieceCid cid.Cid + len abi.UnpaddedPieceSize + onClose context.CancelFunc + + closed bool + seqAt int64 // next byte to be read by io.Reader + + r io.ReadCloser + br *bufio.Reader + rAt int64 +} + +func (p *pieceReader) init() (_ *pieceReader, err error) { + stats.Record(p.ctx, metrics.DagStorePRInitCount.M(1)) + + p.rAt = 0 + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + if err != nil { + return nil, err + } + if p.r == nil { + return nil, nil + } + + p.br = bufio.NewReaderSize(p.r, ReadBuf) + + return p, nil +} + +func (p *pieceReader) check() error { + if p.closed { + return xerrors.Errorf("reader closed") + } + + return nil +} + +func (p *pieceReader) Close() error { + if err := p.check(); err != nil { + return err + } + + if p.r != nil { + if err := p.r.Close(); err != nil { + return err + } + if err := p.r.Close(); err != nil { + return err + } + p.r = nil + } + + p.onClose() + + p.closed = true + + return nil +} + +func (p *pieceReader) Read(b []byte) (int, error) { + if err := p.check(); err != nil { + return 0, err + } + + n, err := p.ReadAt(b, p.seqAt) + p.seqAt += int64(n) + return n, err +} + +func (p *pieceReader) Seek(offset int64, whence int) (int64, error) { + if err := p.check(); err != nil { + return 0, err + } + + switch whence { + case io.SeekStart: + p.seqAt = offset + case io.SeekCurrent: + p.seqAt += offset + case io.SeekEnd: + p.seqAt = int64(p.len) + offset + default: + return 0, xerrors.Errorf("bad whence") + } + + return p.seqAt, nil +} + +func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) { + if err := p.check(); err != nil { + return 0, err + } + + stats.Record(p.ctx, metrics.DagStorePRBytesRequested.M(int64(len(b)))) + + // 1. Get the backing reader into the correct position + + // if the backing reader is ahead of the offset we want, or more than + // MaxPieceReaderBurnBytes behind, reset the reader + if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off { + if p.r != nil { + if err := p.r.Close(); err != nil { + return 0, xerrors.Errorf("closing backing reader: %w", err) + } + p.r = nil + p.br = nil + } + + log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b)) + + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1)) + } else { + stats.Record(p.ctx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1)) + } + + p.rAt = off + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + p.br = bufio.NewReaderSize(p.r, ReadBuf) + if err != nil { + return 0, xerrors.Errorf("getting backing reader: %w", err) + } + } + + // 2. Check if we need to burn some bytes + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1)) + + n, err := io.CopyN(io.Discard, p.br, off-p.rAt) + p.rAt += n + if err != nil { + return 0, xerrors.Errorf("discarding read gap: %w", err) + } + } + + // 3. Sanity check + if off != p.rAt { + return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt) + } + + // 4. Read! + n, err = io.ReadFull(p.br, b) + if n < len(b) { + log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err) + } + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + + p.rAt += int64(n) + return n, err +} + +var _ mount.Reader = (*pieceReader)(nil) diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 1ffb15e5b..d7d7d3265 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -53,7 +53,7 @@ type WorkerSelector interface { type scheduler struct { workersLk sync.RWMutex - workers map[WorkerID]*workerHandle + workers map[storiface.WorkerID]*workerHandle schedule chan *workerRequest windowRequests chan *schedWindowRequest @@ -95,7 +95,7 @@ type workerHandle struct { } type schedWindowRequest struct { - worker WorkerID + worker storiface.WorkerID done chan *schedWindow } @@ -107,14 +107,14 @@ type schedWindow struct { type workerDisableReq struct { activeWindows []*schedWindow - wid WorkerID + wid storiface.WorkerID done func() } type activeResources struct { memUsedMin uint64 memUsedMax uint64 - gpuUsed bool + gpuUsed float64 cpuUse uint64 cond *sync.Cond @@ -145,7 +145,7 @@ type workerResponse struct { func newScheduler() *scheduler { return &scheduler{ - workers: map[WorkerID]*workerHandle{}, + workers: map[storiface.WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), windowRequests: make(chan *schedWindowRequest, 20), @@ -378,7 +378,6 @@ func (sh *scheduler) trySched() { }() task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { @@ -394,6 +393,8 @@ func (sh *scheduler) trySched() { continue } + needRes := worker.info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue @@ -457,7 +458,6 @@ func (sh *scheduler) trySched() { for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { @@ -466,6 +466,8 @@ func (sh *scheduler) trySched() { log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) + needRes := info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index 7c16120c2..5f7f1cfb8 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, r storiface.Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -30,20 +30,20 @@ func (a *activeResources) hasWorkWaiting() bool { return a.waiting > 0 } -func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = true +func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed += r.GPUUtilization } - a.cpuUse += r.Threads(wr.CPUs) + a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin += r.MinMemory a.memUsedMax += r.MaxMemory } -func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = false +func (a *activeResources) free(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed -= r.GPUUtilization } - a.cpuUse -= r.Threads(wr.CPUs) + a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin -= r.MinMemory a.memUsedMax -= r.MaxMemory @@ -54,35 +54,46 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { // canHandleRequest evaluates if the worker has enough available resources to // handle the request. -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { +func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool { if info.IgnoreResources { // shortcircuit; if this worker is ignoring resources, it can always handle the request. return true } res := info.Resources + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory - if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) + memNeeded := needRes.MinMemory + needRes.BaseMinMemory + memUsed := a.memUsedMin + // assume that MemUsed can be swapped, so only check it in the vmem Check + memAvail := res.MemPhysical - memUsed + if memNeeded > memAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM available", wid, caller, memNeeded/mib, memAvail/mib) return false } - maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + vmemNeeded := needRes.MaxMemory + needRes.BaseMinMemory + vmemUsed := a.memUsedMax + workerMemoryReserved := res.MemUsed + res.MemSwapUsed // memory used outside lotus-worker (used by the OS, etc.) - if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + if vmemUsed < workerMemoryReserved { + vmemUsed = workerMemoryReserved + } + vmemAvail := (res.MemPhysical + res.MemSwap) - vmemUsed + + if vmemNeeded > vmemAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM available", wid, caller, vmemNeeded/mib, vmemAvail/mib) return false } - if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs { - log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) + if a.cpuUse+needRes.Threads(res.CPUs, len(res.GPUs)) > res.CPUs { + log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs, len(res.GPUs)), a.cpuUse, res.CPUs) return false } - if len(res.GPUs) > 0 && needRes.CanGPU { - if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller) + if len(res.GPUs) > 0 && needRes.GPUUtilization > 0 { + if a.gpuUsed+needRes.GPUUtilization > float64(len(res.GPUs)) { + log.Debugf("sched: not scheduling on worker %s for %s; GPU(s) in use", wid, caller) return false } } @@ -96,12 +107,21 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { cpu := float64(a.cpuUse) / float64(wr.CPUs) max = cpu - memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) + memUsed := a.memUsedMin + if memUsed < wr.MemUsed { + memUsed = wr.MemUsed + } + memMin := float64(memUsed) / float64(wr.MemPhysical) if memMin > max { max = memMin } - memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) + vmemUsed := a.memUsedMax + if a.memUsedMax < wr.MemUsed+wr.MemSwapUsed { + vmemUsed = wr.MemUsed + wr.MemSwapUsed + } + memMax := float64(vmemUsed) / float64(wr.MemPhysical+wr.MemSwap) + if memMax > max { max = memMax } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 902253cbd..4042f3905 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -42,14 +42,16 @@ func TestWithPriority(t *testing.T) { var decentWorkerResources = storiface.WorkerResources{ MemPhysical: 128 << 30, MemSwap: 200 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 32, - GPUs: []string{"a GPU"}, + GPUs: []string{}, } var constrainedWorkerResources = storiface.WorkerResources{ MemPhysical: 1 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 1, } @@ -101,6 +103,22 @@ func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (s *schedTestWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, peices []abi.PieceInfo) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } @@ -190,6 +208,9 @@ func TestSchedStartStop(t *testing.T) { func TestSched(t *testing.T) { //stm: @WORKER_JOBS_001 + storiface.ParallelNum = 1 + storiface.ParallelDenom = 1 + ctx, done := context.WithTimeout(context.Background(), 30*time.Second) defer done() @@ -256,7 +277,9 @@ func TestSched(t *testing.T) { return nil }, noopAction) - require.NoError(t, err, fmt.Sprint(l, l2)) + if err != context.Canceled { + require.NoError(t, err, fmt.Sprint(l, l2)) + } }() <-sched.testSync @@ -301,9 +324,6 @@ func TestSched(t *testing.T) { } testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { - ParallelNum = 1 - ParallelDenom = 1 - return func(t *testing.T) { index := stores.NewIndex() @@ -560,7 +580,7 @@ func BenchmarkTrySched(b *testing.B) { b.StopTimer() sched := newScheduler() - sched.workers[WorkerID{}] = &workerHandle{ + sched.workers[storiface.WorkerID{}] = &workerHandle{ workerRpc: nil, info: storiface.WorkerInfo{ Hostname: "t", @@ -572,7 +592,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < windows; i++ { sched.openWindows = append(sched.openWindows, &schedWindowRequest{ - worker: WorkerID{}, + worker: storiface.WorkerID{}, done: make(chan *schedWindow, 1000), }) } @@ -618,7 +638,7 @@ func TestWindowCompact(t *testing.T) { taskType: task, sector: storage.SectorRef{ProofType: spt}, }) - window.allocated.add(wh.info.Resources, ResourceTable[task][spt]) + window.allocated.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } wh.activeWindows = append(wh.activeWindows, window) @@ -637,7 +657,7 @@ func TestWindowCompact(t *testing.T) { for ti, task := range tasks { require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti) - expectRes.add(wh.info.Resources, ResourceTable[task][spt]) + expectRes.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi) diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index e717e58e2..762c3fc3a 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -4,17 +4,18 @@ import ( "context" "time" - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type schedWorker struct { sched *scheduler worker *workerHandle - wid WorkerID + wid storiface.WorkerID heartbeatTimer *time.Ticker scheduledWindows chan *schedWindow @@ -50,7 +51,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { closedMgr: make(chan struct{}), } - wid := WorkerID(sessID) + wid := storiface.WorkerID(sessID) sh.workersLk.Lock() _, exist := sh.workers[wid] @@ -237,7 +238,7 @@ func (sw *schedWorker) checkSession(ctx context.Context) bool { continue } - if WorkerID(curSes) != sw.wid { + if storiface.WorkerID(curSes) != sw.wid { if curSes != ClosedWorkerID { // worker restarted log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes) @@ -296,7 +297,7 @@ func (sw *schedWorker) workerCompactWindows() { var moved []int for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -357,7 +358,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -418,7 +419,7 @@ assignLoop: continue } - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := storiface.ResourceTable[todo.taskType][todo.sector.ProofType] if worker.active.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -456,7 +457,7 @@ assignLoop: func (sw *schedWorker) startProcessingTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.lk.Lock() w.preparing.add(w.info.Resources, needRes) @@ -539,7 +540,7 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.active.add(w.info.Resources, needRes) @@ -579,7 +580,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { return nil } -func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { +func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) { select { case <-w.closingMgr: default: diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index 6d341a4b3..f6104878b 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -13,17 +13,26 @@ const ( TTFetch TaskType = "seal/v0/fetch" TTUnseal TaskType = "seal/v0/unseal" + + TTReplicaUpdate TaskType = "seal/v0/replicaupdate" + TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" + TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" + TTRegenSectorKey TaskType = "seal/v0/regensectorkey" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTFinalize: -2, // most priority + TTRegenSectorKey: 10, // least priority + TTAddPiece: 9, + TTReplicaUpdate: 8, + TTProveReplicaUpdate2: 7, + TTProveReplicaUpdate1: 6, + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ @@ -38,6 +47,11 @@ var shortNames = map[TaskType]string{ TTFetch: "GET", TTUnseal: "UNS", + + TTReplicaUpdate: "RU", + TTProveReplicaUpdate1: "PR1", + TTProveReplicaUpdate2: "PR2", + TTRegenSectorKey: "GSK", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 845bfdd7b..771a9a3a1 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -2,7 +2,6 @@ package stores import ( "encoding/json" - "io" "net/http" "os" "strconv" @@ -85,7 +84,6 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request // remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. // returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { - log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) id, err := storiface.ParseSectorID(vars["id"]) @@ -139,17 +137,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } - rd, err := tarutil.TarDirectory(path) - if err != nil { - log.Errorf("%+v", err) - w.WriteHeader(500) - return - } - w.Header().Set("Content-Type", "application/x-tar") w.WriteHeader(200) - if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { - log.Errorf("%+v", err) + + err := tarutil.TarDirectory(path, w, make([]byte, CopyBuf)) + if err != nil { + log.Errorf("send tar: %+v", err) return } } else { diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 2a37e653a..a90cdf0b9 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -29,6 +29,8 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 // filesystem, local or networked / shared by multiple machines type ID string +type Group = string + type StorageInfo struct { ID ID URLs []string // TODO: Support non-http transports @@ -37,6 +39,9 @@ type StorageInfo struct { CanSeal bool CanStore bool + + Groups []Group + AllowTo []Group } type HealthReport struct { @@ -71,6 +76,7 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) StorageList(ctx context.Context) (map[ID][]Decl, error) } @@ -168,6 +174,8 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS i.stores[si.ID].info.MaxStorage = si.MaxStorage i.stores[si.ID].info.CanSeal = si.CanSeal i.stores[si.ID].info.CanStore = si.CanStore + i.stores[si.ID].info.Groups = si.Groups + i.stores[si.ID].info.AllowTo = si.AllowTo return nil } @@ -292,6 +300,8 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif storageIDs := map[ID]uint64{} isprimary := map[ID]bool{} + allowTo := map[Group]struct{}{} + for _, pathType := range storiface.PathTypes { if ft&pathType == 0 { continue @@ -323,6 +333,14 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif urls[k] = rl.String() } + if allowTo != nil && len(st.info.AllowTo) > 0 { + for _, group := range st.info.AllowTo { + allowTo[group] = struct{}{} + } + } else { + allowTo = nil // allow to any + } + out = append(out, SectorStorageInfo{ ID: id, URLs: urls, @@ -365,6 +383,22 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif continue } + if allowTo != nil { + allow := false + for _, group := range st.info.Groups { + if _, found := allowTo[group]; found { + log.Debugf("path %s in allowed group %s", st.info.ID, group) + allow = true + break + } + } + + if !allow { + log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", st.info.ID, allowTo, st.info.Groups) + continue + } + } + urls := make([]string, len(st.info.URLs)) for k, u := range st.info.URLs { rl, err := url.Parse(u) diff --git a/extern/sector-storage/stores/index_locks.go b/extern/sector-storage/stores/index_locks.go index 3a5ff940e..ade437c6b 100644 --- a/extern/sector-storage/stores/index_locks.go +++ b/extern/sector-storage/stores/index_locks.go @@ -2,6 +2,7 @@ package stores import ( "context" + "sort" "sync" "golang.org/x/xerrors" @@ -154,3 +155,32 @@ func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write) } + +func (i *indexLocks) StorageGetLocks(context.Context) (storiface.SectorLocks, error) { + i.lk.Lock() + defer i.lk.Unlock() + + out := storiface.SectorLocks{ + Locks: []storiface.SectorLock{}, + } + + for id, lock := range i.locks { + l := storiface.SectorLock{Sector: id} + + for t, b := range lock.w.All() { + if b { + l.Write[t]++ + } + } + + copy(l.Read[:], lock.r[:]) + + out.Locks = append(out.Locks, l) + } + + sort.Slice(out.Locks, func(i, j int) bool { + return out.Locks[i].Sector.Number < out.Locks[j].Sector.Number + }) + + return out, nil +} diff --git a/extern/sector-storage/stores/index_test.go b/extern/sector-storage/stores/index_test.go new file mode 100644 index 000000000..bb4239035 --- /dev/null +++ b/extern/sector-storage/stores/index_test.go @@ -0,0 +1,154 @@ +package stores + +import ( + "context" + "testing" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +func init() { + logging.SetLogLevel("stores", "DEBUG") +} + +func newTestStorage() StorageInfo { + return StorageInfo{ + ID: ID(uuid.New().String()), + CanSeal: true, + CanStore: true, + Groups: nil, + AllowTo: nil, + } +} + +var bigFsStat = fsutil.FsStat{ + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 0, + Max: 0, + Used: 0, +} + +const s32g = 32 << 30 + +func TestFindSimple(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 0) + } + + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + } +} + +func TestFindNoAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } +} + +func TestFindAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + + stor2 := newTestStorage() + stor2.Groups = []Group{"grp1"} + + stor3 := newTestStorage() + stor3.Groups = []Group{"grp2"} + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor3, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + if si[0].ID == stor1.ID { + require.Equal(t, stor1.ID, si[0].ID) + require.Equal(t, stor2.ID, si[1].ID) + } else { + require.Equal(t, stor1.ID, si[1].ID) + require.Equal(t, stor2.ID, si[0].ID) + } + } +} diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index c2e8e3df6..8121c418d 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -46,6 +46,13 @@ type LocalStorageMeta struct { // MaxStorage specifies the maximum number of bytes to use for sector storage // (0 = unlimited) MaxStorage uint64 + + // List of storage groups this path belongs to + Groups []string + + // List of storage groups to which data from this path can be moved. If none + // are specified, allow to all + AllowTo []string } // StorageConfig .lotusstorage/storage.json @@ -212,6 +219,8 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("declaring storage in index: %w", err) @@ -276,6 +285,8 @@ func (st *Local) Redeclare(ctx context.Context) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("redeclaring storage in index: %w", err) diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go index be0eaf967..268148536 100644 --- a/extern/sector-storage/stores/mocks/index.go +++ b/extern/sector-storage/stores/mocks/index.go @@ -110,6 +110,21 @@ func (mr *MockSectorIndexMockRecorder) StorageFindSector(arg0, arg1, arg2, arg3, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), arg0, arg1, arg2, arg3, arg4) } +// StorageGetLocks mocks base method. +func (m *MockSectorIndex) StorageGetLocks(arg0 context.Context) (storiface.SectorLocks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageGetLocks", arg0) + ret0, _ := ret[0].(storiface.SectorLocks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageGetLocks indicates an expected call of StorageGetLocks. +func (mr *MockSectorIndexMockRecorder) StorageGetLocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageGetLocks", reflect.TypeOf((*MockSectorIndex)(nil).StorageGetLocks), arg0) +} + // StorageInfo mocks base method. func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) { m.ctrl.T.Helper() diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index aa6075e62..bd6b34be3 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -281,7 +281,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { switch mediatype { case "application/x-tar": - return tarutil.ExtractTar(resp.Body, outname) + return tarutil.ExtractTar(resp.Body, outname, make([]byte, CopyBuf)) case "application/octet-stream": f, err := os.Create(outname) if err != nil { @@ -305,7 +305,6 @@ func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.Registe return false, xerrors.Errorf("request: %w", err) } req.Header = r.auth.Clone() - fmt.Printf("req using header: %#v \n", r.auth) req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) @@ -586,7 +585,7 @@ func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offse // 1. no worker(local worker included) has an unsealed file for the given sector OR // 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. // Will return a nil reader and a nil error in such a case. -func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error), error) { ft := storiface.FTUnsealed // check if we have the unsealed sector file locally @@ -624,7 +623,52 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a if has { log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) - return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // don't reuse between readers unless closed + f := pf + pf = nil + + if f == nil { + f, err = r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file (re)opened %s (+%d,%d)", path, offset, size) + } + + r, err := r.pfHandler.Reader(f, storiface.PaddedByteIndex(offset)+startOffsetAligned, size-abi.PaddedPieceSize(startOffsetAligned)) + if err != nil { + return nil, err + } + + return struct { + io.Reader + io.Closer + }{ + Reader: r, + Closer: funcCloser(func() error { + // if we already have a reader cached, close this one + if pf != nil { + if f == nil { + return nil + } + if pf == f { + pf = nil + } + + tmp := f + f = nil + return tmp.Close() + } + + // otherwise stash it away for reuse + pf = f + return nil + }), + }, nil + }, nil + } log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) @@ -667,16 +711,18 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a continue } - // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. - // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. - rd, err := r.readRemote(ctx, url, offset, size) - if err != nil { - log.Warnw("reading from remote", "url", url, "error", err) - lastErr = err - continue - } - log.Infof("Read remote %s (+%d,%d)", url, offset, size) - return rd, nil + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset+abi.PaddedPieceSize(startOffsetAligned), size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + return nil, err + } + + return rd, err + }, nil + } } @@ -693,3 +739,11 @@ func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac } var _ Store = &Remote{} + +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go index 754f6d1ce..a7a82a728 100644 --- a/extern/sector-storage/stores/remote_test.go +++ b/extern/sector-storage/stores/remote_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/http/httptest" @@ -472,12 +473,20 @@ func TestReader(t *testing.T) { remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) - rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + rdg, err := remoteStore.Reader(ctx, sectorRef, offset, size) + var rd io.ReadCloser if tc.errStr != "" { - require.Error(t, err) - require.Nil(t, rd) - require.Contains(t, err.Error(), tc.errStr) + if rdg == nil { + require.Error(t, err) + require.Nil(t, rdg) + require.Contains(t, err.Error(), tc.errStr) + } else { + rd, err = rdg(0) + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } } else { require.NoError(t, err) } @@ -485,7 +494,10 @@ func TestReader(t *testing.T) { if !tc.expectedNonNilReader { require.Nil(t, rd) } else { - require.NotNil(t, rd) + require.NotNil(t, rdg) + rd, err := rdg(0) + require.NoError(t, err) + defer func() { require.NoError(t, rd.Close()) }() diff --git a/extern/sector-storage/storiface/filetype.go b/extern/sector-storage/storiface/filetype.go index 2e0999022..83fcadc90 100644 --- a/extern/sector-storage/storiface/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -12,11 +12,13 @@ const ( FTUnsealed SectorFileType = 1 << iota FTSealed FTCache + FTUpdate + FTUpdateCache FileTypes = iota ) -var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache} const ( FTNone SectorFileType = 0 @@ -25,15 +27,21 @@ const ( const FSOverheadDen = 10 var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 141, // 11 layers + D(2x ssize) + C + R + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: FSOverheadDen * 2, + FTCache: 141, // 11 layers + D(2x ssize) + C + R' } +// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size + var FsOverheadFinalized = map[SectorFileType]int{ - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 2, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize??? + FTUpdateCache: FSOverheadDen, + FTCache: 2, } type SectorFileType int @@ -46,6 +54,10 @@ func (t SectorFileType) String() string { return "sealed" case FTCache: return "cache" + case FTUpdate: + return "update" + case FTUpdateCache: + return "update-cache" default: return fmt.Sprintf("", t) } @@ -104,9 +116,11 @@ func (t SectorFileType) All() [FileTypes]bool { type SectorPaths struct { ID abi.SectorID - Unsealed string - Sealed string - Cache string + Unsealed string + Sealed string + Cache string + Update string + UpdateCache string } func ParseSectorID(baseName string) (abi.SectorID, error) { @@ -139,6 +153,10 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { return sps.Sealed case FTCache: return sps.Cache + case FTUpdate: + return sps.Update + case FTUpdateCache: + return sps.UpdateCache } panic("requested unknown path type") @@ -152,5 +170,9 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { sps.Sealed = p case FTCache: sps.Cache = p + case FTUpdate: + sps.Update = p + case FTUpdateCache: + sps.UpdateCache = p } } diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/storiface/resources.go similarity index 61% rename from extern/sector-storage/resources.go rename to extern/sector-storage/storiface/resources.go index 2e989fdf4..b5f45d722 100644 --- a/extern/sector-storage/resources.go +++ b/extern/sector-storage/storiface/resources.go @@ -1,19 +1,31 @@ -package sectorstorage +package storiface import ( - "github.com/filecoin-project/go-state-types/abi" + "fmt" + "reflect" + "strconv" + "strings" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) type Resources struct { - MinMemory uint64 // What Must be in RAM for decent perf - MaxMemory uint64 // Memory required (swap + ram) + MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf + MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution) - MaxParallelism int // -1 = multithread - CanGPU bool + // GPUUtilization specifes the number of GPUs a task can use + GPUUtilization float64 `envname:"GPU_UTILIZATION"` - BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) + // MaxParallelism specifies the number of CPU cores when GPU is NOT in use + MaxParallelism int `envname:"MAX_PARALLELISM"` // -1 = multithread + + // MaxParallelismGPU specifies the number of CPU cores when GPU is in use + MaxParallelismGPU int `envname:"MAX_PARALLELISM_GPU"` // when 0, inherits MaxParallelism + + BaseMinMemory uint64 `envname:"BASE_MIN_MEMORY"` // What Must be in RAM for decent perf (shared between threads) } /* @@ -32,8 +44,14 @@ var ParallelNum uint64 = 92 var ParallelDenom uint64 = 100 // TODO: Take NUMA into account -func (r Resources) Threads(wcpus uint64) uint64 { - if r.MaxParallelism == -1 { +func (r Resources) Threads(wcpus uint64, gpus int) uint64 { + mp := r.MaxParallelism + + if r.GPUUtilization > 0 && gpus > 0 && r.MaxParallelismGPU != 0 { // task can use GPUs and worker has some + mp = r.MaxParallelismGPU + } + + if mp == -1 { n := (wcpus * ParallelNum) / ParallelDenom if n == 0 { return wcpus @@ -41,7 +59,7 @@ func (r Resources) Threads(wcpus uint64) uint64 { return n } - return uint64(r.MaxParallelism) + return uint64(mp) } var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ @@ -134,8 +152,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 30 << 30, MinMemory: 30 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 1 << 30, }, @@ -143,8 +162,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 15 << 30, MinMemory: 15 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 1 << 30, }, @@ -220,8 +240,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 190 << 30, // TODO: Confirm MinMemory: 60 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 64 << 30, // params }, @@ -229,8 +250,9 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory MinMemory: 30 << 30, - MaxParallelism: -1, - CanGPU: true, + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, BaseMinMemory: 32 << 30, // params }, @@ -239,7 +261,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 30, MaxParallelism: 1, // This is fine - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 10 << 30, }, @@ -248,7 +270,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 2 << 10, MaxParallelism: 1, - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 2 << 10, }, @@ -257,7 +279,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 8 << 20, MaxParallelism: 1, - CanGPU: true, + GPUUtilization: 1.0, BaseMinMemory: 8 << 20, }, @@ -268,7 +290,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -277,7 +299,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -286,7 +308,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -295,7 +317,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -304,7 +326,7 @@ var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources MinMemory: 1 << 20, MaxParallelism: 0, - CanGPU: false, + GPUUtilization: 0, BaseMinMemory: 0, }, @@ -323,3 +345,83 @@ func init() { m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] } } + +func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources, error) { + out := map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{} + + for taskType, defTT := range ResourceTable { + out[taskType] = map[abi.RegisteredSealProof]Resources{} + + for spt, defRes := range defTT { + r := defRes // copy + + spsz, err := spt.SectorSize() + if err != nil { + return nil, xerrors.Errorf("getting sector size: %w", err) + } + shortSize := strings.TrimSuffix(spsz.ShortString(), "iB") + + rr := reflect.ValueOf(&r) + for i := 0; i < rr.Elem().Type().NumField(); i++ { + f := rr.Elem().Type().Field(i) + + envname := f.Tag.Get("envname") + if envname == "" { + return nil, xerrors.Errorf("no envname for field '%s'", f.Name) + } + + envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface())) + if !found { + // special multicore SDR handling + if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" { + v, ok := rr.Elem().Field(i).Addr().Interface().(*int) + if !ok { + // can't happen, but let's not panic + return nil, xerrors.Errorf("res.MAX_PARALLELISM is not int (!?): %w", err) + } + *v, err = getSDRThreads(lookup) + if err != nil { + return nil, err + } + } + + continue + } + + v := rr.Elem().Field(i).Addr().Interface() + switch fv := v.(type) { + case *uint64: + *fv, err = strconv.ParseUint(envval, 10, 64) + case *int: + *fv, err = strconv.Atoi(envval) + case *float64: + *fv, err = strconv.ParseFloat(envval, 64) + default: + return nil, xerrors.Errorf("unknown resource field type") + } + } + + out[taskType][spt] = r + } + } + + return out, nil +} + +func getSDRThreads(lookup func(key, def string) (string, bool)) (_ int, err error) { + producers := 0 + + if v, _ := lookup("FIL_PROOFS_USE_MULTICORE_SDR", ""); v == "1" { + producers = 3 + + if penv, found := lookup("FIL_PROOFS_MULTICORE_SDR_PRODUCERS", ""); found { + producers, err = strconv.Atoi(penv) + if err != nil { + return 0, xerrors.Errorf("parsing (atoi) FIL_PROOFS_MULTICORE_SDR_PRODUCERS: %w", err) + } + } + } + + // producers + the one core actually doing the work + return producers + 1, nil +} diff --git a/extern/sector-storage/storiface/resources_test.go b/extern/sector-storage/storiface/resources_test.go new file mode 100644 index 000000000..bf7425d24 --- /dev/null +++ b/extern/sector-storage/storiface/resources_test.go @@ -0,0 +1,75 @@ +package storiface + +import ( + "fmt" + "testing" + + stabi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/stretchr/testify/require" +) + +func TestListResourceVars(t *testing.T) { + _, err := ParseResourceEnv(func(key, def string) (string, bool) { + if def != "" { + fmt.Printf("%s=%s\n", key, def) + } + + return "", false + }) + + require.NoError(t, err) +} + +func TestListResourceOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "UNS_2K_MAX_PARALLELISM" { + return "2", true + } + if key == "PC2_2K_GPU_UTILIZATION" { + return "0.4", true + } + if key == "PC2_2K_MAX_MEMORY" { + return "2222", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 0.4, rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].GPUUtilization) + require.Equal(t, uint64(2222), rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxMemory) + + // check that defaults don't get mutated + require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} + +func TestListResourceSDRMulticoreOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 4, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 4, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + + rt, err = ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + if key == "FIL_PROOFS_MULTICORE_SDR_PRODUCERS" { + return "9000", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} diff --git a/extern/sector-storage/storiface/storage.go b/extern/sector-storage/storiface/storage.go index e836002d5..624b71d77 100644 --- a/extern/sector-storage/storiface/storage.go +++ b/extern/sector-storage/storiface/storage.go @@ -1,5 +1,7 @@ package storiface +import "github.com/filecoin-project/go-state-types/abi" + type PathType string const ( @@ -13,3 +15,17 @@ const ( AcquireMove AcquireMode = "move" AcquireCopy AcquireMode = "copy" ) + +type Refs struct { + RefCount [FileTypes]uint +} + +type SectorLock struct { + Sector abi.SectorID + Write [FileTypes]uint + Read [FileTypes]uint +} + +type SectorLocks struct { + Locks []SectorLock +} diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index e3374d6cf..8bb6a256a 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -15,6 +15,12 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) +type WorkerID uuid.UUID // worker session UUID + +func (w WorkerID) String() string { + return uuid.UUID(w).String() +} + type WorkerInfo struct { Hostname string @@ -28,12 +34,35 @@ type WorkerInfo struct { type WorkerResources struct { MemPhysical uint64 + MemUsed uint64 MemSwap uint64 - - MemReserved uint64 // Used by system / other processes + MemSwapUsed uint64 CPUs uint64 // Logical cores GPUs []string + + // if nil use the default resource table + Resources map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources +} + +func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks.TaskType) Resources { + res := ResourceTable[tt][spt] + + // if the worker specifies custom resource table, prefer that + if wr.Resources != nil { + tr, ok := wr.Resources[tt] + if !ok { + return res + } + + r, ok := tr[spt] + if ok { + return r + } + } + + // otherwise, use the default resource table + return res } type WorkerStats struct { @@ -42,8 +71,8 @@ type WorkerStats struct { MemUsedMin uint64 MemUsedMax uint64 - GpuUsed bool // nolint - CpuUse uint64 // nolint + GpuUsed float64 // nolint + CpuUse uint64 // nolint } const ( @@ -92,6 +121,10 @@ type WorkerCalls interface { SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error) + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error) + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (CallID, error) + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (CallID, error) MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) @@ -145,6 +178,10 @@ type WorkerReturn interface { ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error + ReturnReplicaUpdate(ctx context.Context, callID CallID, out storage.ReplicaUpdateOut, err *CallError) error + ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error + ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error + ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error diff --git a/extern/sector-storage/tarutil/systar.go b/extern/sector-storage/tarutil/systar.go index 2329aafc7..eb958fa02 100644 --- a/extern/sector-storage/tarutil/systar.go +++ b/extern/sector-storage/tarutil/systar.go @@ -14,7 +14,7 @@ import ( var log = logging.Logger("tarutil") // nolint -func ExtractTar(body io.Reader, dir string) error { +func ExtractTar(body io.Reader, dir string, buf []byte) error { if err := os.MkdirAll(dir, 0755); err != nil { // nolint return xerrors.Errorf("mkdir: %w", err) } @@ -38,7 +38,7 @@ func ExtractTar(body io.Reader, dir string) error { // This data is coming from a trusted source, no need to check the size. //nolint:gosec - if _, err := io.Copy(f, tr); err != nil { + if _, err := io.CopyBuffer(f, tr, buf); err != nil { return err } @@ -48,17 +48,7 @@ func ExtractTar(body io.Reader, dir string) error { } } -func TarDirectory(dir string) (io.ReadCloser, error) { - r, w := io.Pipe() - - go func() { - _ = w.CloseWithError(writeTarDirectory(dir, w)) - }() - - return r, nil -} - -func writeTarDirectory(dir string, w io.Writer) error { +func TarDirectory(dir string, w io.Writer, buf []byte) error { tw := tar.NewWriter(w) files, err := ioutil.ReadDir(dir) @@ -81,7 +71,7 @@ func writeTarDirectory(dir string, w io.Writer) error { return xerrors.Errorf("opening %s for reading: %w", file.Name(), err) } - if _, err := io.Copy(tw, f); err != nil { + if _, err := io.CopyBuffer(tw, f, buf); err != nil { return xerrors.Errorf("copy data for file %s: %w", file.Name(), err) } diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index 72b27b154..9fdb3a913 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -55,10 +55,30 @@ func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } +func (t *testExec) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + panic("implement me") +} + +func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + panic("implement me") +} + func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index 2fe99f3d4..dd23278ae 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/google/uuid" + cid "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -67,6 +68,33 @@ func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pie }) } +func (t *testWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + out, err := t.mockSeal.ReplicaUpdate(ctx, sector, pieces) + if err := t.ret.ReturnReplicaUpdate(ctx, ci, out, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + vanillaProofs, err := t.mockSeal.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + if err := t.ret.ReturnProveReplicaUpdate1(ctx, ci, vanillaProofs, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + proof, err := t.mockSeal.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err := t.ret.ReturnProveReplicaUpdate2(ctx, ci, proof, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { t.pc1s++ @@ -102,14 +130,15 @@ func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { } func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { - res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] + res := storiface.ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] return storiface.WorkerInfo{ Hostname: "testworkerer", Resources: storiface.WorkerResources{ MemPhysical: res.MinMemory * 3, + MemUsed: res.MinMemory, + MemSwapUsed: 0, MemSwap: 0, - MemReserved: res.MinMemory, CPUs: 32, GPUs: nil, }, diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index d45d140f8..a5f5a0b9d 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -28,7 +28,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} +var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache} type WorkerConfig struct { TaskTypes []sealtasks.TaskType @@ -42,6 +42,7 @@ type WorkerConfig struct { // used do provide custom proofs impl (mostly used in testing) type ExecutorFunc func() (ffiwrapper.Storage, error) +type EnvFunc func(string) (string, bool) type LocalWorker struct { storage stores.Store @@ -50,6 +51,7 @@ type LocalWorker struct { ret storiface.WorkerReturn executor ExecutorFunc noSwap bool + envLookup EnvFunc // see equivalent field on WorkerConfig. ignoreResources bool @@ -64,7 +66,7 @@ type LocalWorker struct { closing chan struct{} } -func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { +func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -82,6 +84,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store acceptTasks: acceptTasks, executor: executor, noSwap: wcfg.NoSwap, + envLookup: envLookup, ignoreResources: wcfg.IgnoreResourceFiltering, session: uuid.New(), closing: make(chan struct{}), @@ -115,7 +118,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store } func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { - return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst) + return newLocalWorker(nil, wcfg, os.LookupEnv, store, local, sindex, ret, cst) } type localWorkerPathProvider struct { @@ -145,7 +148,6 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor } sid := storiface.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } @@ -160,16 +162,20 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { type ReturnType string const ( - AddPiece ReturnType = "AddPiece" - SealPreCommit1 ReturnType = "SealPreCommit1" - SealPreCommit2 ReturnType = "SealPreCommit2" - SealCommit1 ReturnType = "SealCommit1" - SealCommit2 ReturnType = "SealCommit2" - FinalizeSector ReturnType = "FinalizeSector" - ReleaseUnsealed ReturnType = "ReleaseUnsealed" - MoveStorage ReturnType = "MoveStorage" - UnsealPiece ReturnType = "UnsealPiece" - Fetch ReturnType = "Fetch" + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + ReplicaUpdate ReturnType = "ReplicaUpdate" + ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" + ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" + GenerateSectorKey ReturnType = "GenerateSectorKey" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + Fetch ReturnType = "Fetch" ) // in: func(WorkerReturn, context.Context, CallID, err string) @@ -207,16 +213,20 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ - AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), - SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), - SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), - FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), - UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate), + ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), + ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), + GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { @@ -240,7 +250,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } res, err := work(ctx, ci) - if err != nil { rb, err := json.Marshal(res) if err != nil { @@ -258,7 +267,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } } }() - return ci, nil } @@ -382,6 +390,51 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, }) } +func (l *LocalWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + sealerOut, err := sb.ReplicaUpdate(ctx, sector, pieces) + return sealerOut, err + }) +} + +func (l *LocalWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (l *LocalWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) +} + +func (l *LocalWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, GenerateSectorKey, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return nil, sb.GenerateSectorKeyFromData(ctx, sector, commD) + }) +} + func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { sb, err := l.executor() if err != nil { @@ -482,6 +535,52 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { return l.localStore.Local(ctx) } +func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint64, err error) { + h, err := sysinfo.Host() + if err != nil { + return 0, 0, 0, 0, err + } + + mem, err := h.Memory() + if err != nil { + return 0, 0, 0, 0, err + } + memPhysical = mem.Total + // mem.Available is memory available without swapping, it is more relevant for this calculation + memUsed = mem.Total - mem.Available + memSwap = mem.VirtualTotal + memSwapUsed = mem.VirtualUsed + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV1Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV2Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if l.noSwap { + memSwap = 0 + memSwapUsed = 0 + } + + return memPhysical, memUsed, memSwap, memSwapUsed, nil +} + func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { hostname, err := os.Hostname() // TODO: allow overriding from config if err != nil { @@ -493,30 +592,29 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { log.Errorf("getting gpu devices failed: %+v", err) } - h, err := sysinfo.Host() - if err != nil { - return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) - } - - mem, err := h.Memory() + memPhysical, memUsed, memSwap, memSwapUsed, err := l.memInfo() if err != nil { return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - memSwap := mem.VirtualTotal - if l.noSwap { - memSwap = 0 + resEnv, err := storiface.ParseResourceEnv(func(key, def string) (string, bool) { + return l.envLookup(key) + }) + if err != nil { + return storiface.WorkerInfo{}, xerrors.Errorf("interpreting resource env vars: %w", err) } return storiface.WorkerInfo{ Hostname: hostname, IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ - MemPhysical: mem.Total, + MemPhysical: memPhysical, + MemUsed: memUsed, MemSwap: memSwap, - MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + MemSwapUsed: memSwapUsed, CPUs: uint64(runtime.NumCPU()), GPUs: gpus, + Resources: resEnv, }, }, nil } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 5702426c3..a1c647422 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -20,7 +20,7 @@ import ( type trackedWork struct { job storiface.WorkerJob - worker WorkerID + worker storiface.WorkerID workerHostname string } @@ -58,7 +58,7 @@ func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) { delete(wt.running, callID) } -func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) { +func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid storiface.WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) { tracked := func(rw int, callID storiface.CallID) trackedWork { return trackedWork{ job: storiface.WorkerJob{ @@ -98,7 +98,6 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid Worke wt.lk.Lock() delete(wt.prepared, prepID) } - callID, err := cb() if err != nil { return callID, err @@ -122,7 +121,7 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid Worke return callID, err } -func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker { +func (wt *workTracker) worker(wid storiface.WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker { return &trackedWorker{ Worker: w, wid: wid, @@ -152,7 +151,7 @@ func (wt *workTracker) Running() ([]trackedWork, []trackedWork) { type trackedWorker struct { Worker - wid WorkerID + wid storiface.WorkerID workerInfo storiface.WorkerInfo execute chan struct{} // channel blocking execution in case we're waiting for resources but the task is ready to execute @@ -198,4 +197,22 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, i return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, id, sealtasks.TTUnseal, func() (storiface.CallID, error) { return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) }) } +func (t *trackedWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTReplicaUpdate, func() (storiface.CallID, error) { + return t.Worker.ReplicaUpdate(ctx, sector, pieces) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate1, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate2, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) +} + var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 9dcb779a7..c6cd0bb49 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -595,7 +595,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil { - return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)}) + return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)}) } } diff --git a/gateway/node.go b/gateway/node.go index 56f95a31b..a0c120d39 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -33,6 +33,8 @@ const ( // (to make it easy to mock for tests) type TargetAPI interface { Version(context.Context) (api.APIVersion, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) @@ -44,6 +46,7 @@ type TargetAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) @@ -144,6 +147,14 @@ func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) { return gw.target.Version(ctx) } +func (gw *Node) ChainGetParentMessages(ctx context.Context, c cid.Cid) ([]api.Message, error) { + return gw.target.ChainGetParentMessages(ctx, c) +} + +func (gw *Node) ChainGetParentReceipts(ctx context.Context, c cid.Cid) ([]*types.MessageReceipt, error) { + return gw.target.ChainGetParentReceipts(ctx, c) +} + func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { return gw.target.ChainGetBlockMessages(ctx, c) } @@ -231,6 +242,10 @@ func (gw *Node) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]* return gw.target.ChainGetPath(ctx, from, to) } +func (gw *Node) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { + return gw.target.ChainGetGenesis(ctx) +} + func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { return gw.target.ChainReadObj(ctx, c) } diff --git a/gen/api/proxygen.go b/gen/api/proxygen.go index 3e0766c31..df39132ff 100644 --- a/gen/api/proxygen.go +++ b/gen/api/proxygen.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" "text/template" - "unicode" "golang.org/x/xerrors" ) @@ -71,9 +70,6 @@ func typeName(e ast.Expr, pkg string) (string, error) { return t.X.(*ast.Ident).Name + "." + t.Sel.Name, nil case *ast.Ident: pstr := t.Name - if !unicode.IsLower(rune(pstr[0])) && pkg != "api" { - pstr = "api." + pstr // todo src pkg name - } return pstr, nil case *ast.ArrayType: subt, err := typeName(t.Elt, pkg) diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json index e26b1b28f..ef97db651 100644 --- a/gen/inlinegen-data.json +++ b/gen/inlinegen-data.json @@ -1,7 +1,7 @@ { - "actorVersions": [0, 2, 3, 4, 5, 6], - "latestActorsVersion": 6, + "actorVersions": [0, 2, 3, 4, 5, 6, 7], + "latestActorsVersion": 7, - "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], - "latestNetworkVersion": 14 + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + "latestNetworkVersion": 15 } diff --git a/go.mod b/go.mod index 5f0f954a1..c86511c38 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/filecoin-project/lotus go 1.16 require ( - contrib.go.opencensus.io/exporter/jaeger v0.2.1 contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/BurntSushi/toml v0.4.1 github.com/GeertJohan/go.rice v1.0.2 @@ -14,47 +13,50 @@ require ( github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/buger/goterm v1.0.3 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 + github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 github.com/coreos/go-systemd/v22 v22.3.2 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e - github.com/dgraph-io/badger/v2 v2.2007.2 + github.com/dgraph-io/badger/v2 v2.2007.3 + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/docker/go-units v0.4.0 - github.com/drand/drand v1.2.1 - github.com/drand/kyber v1.1.4 + github.com/drand/drand v1.3.0 + github.com/drand/kyber v1.1.7 github.com/dustin/go-humanize v1.0.0 github.com/elastic/go-sysinfo v1.7.0 github.com/elastic/gosigar v0.14.1 github.com/etclabscore/go-openrpc-reflect v0.0.36 github.com/fatih/color v1.13.0 - github.com/filecoin-project/dagstore v0.4.3 + github.com/filecoin-project/dagstore v0.4.4 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f github.com/filecoin-project/go-address v0.0.6 github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 - github.com/filecoin-project/go-commp-utils v0.1.2 + github.com/filecoin-project/go-commp-utils v0.1.3 github.com/filecoin-project/go-crypto v0.0.1 - github.com/filecoin-project/go-data-transfer v1.11.4 + github.com/filecoin-project/go-data-transfer v1.12.0 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 - github.com/filecoin-project/go-fil-markets v1.13.3 + github.com/filecoin-project/go-fil-markets v1.13.5 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.2 - github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 + github.com/filecoin-project/go-state-types v0.1.1 github.com/filecoin-project/go-statemachine v1.0.1 - github.com/filecoin-project/go-statestore v0.1.1 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/filecoin-project/go-statestore v0.2.0 + github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/specs-actors v0.9.14 - github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v2 v2.3.6 github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-actors/v6 v6.0.1 - github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 + github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec + github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.12.0 + github.com/golang/glog v1.0.0 // indirect github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.7.4 @@ -66,60 +68,60 @@ require ( github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/go-bitswap v0.3.4 + github.com/ipfs/go-bitswap v0.5.1 github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-blockservice v0.1.7 + github.com/ipfs/go-blockservice v0.2.1 github.com/ipfs/go-cid v0.1.0 github.com/ipfs/go-cidutil v0.0.2 - github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e - github.com/ipfs/go-ds-leveldb v0.4.2 - github.com/ipfs/go-ds-measure v0.1.0 - github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ds-badger2 v0.1.2 + github.com/ipfs/go-ds-leveldb v0.5.0 + github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.10.4 - github.com/ipfs/go-ipfs-blockstore v1.0.4 + github.com/ipfs/go-graphsync v0.11.0 + github.com/ipfs/go-ipfs-blockstore v1.1.2 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 - github.com/ipfs/go-ipfs-ds-help v1.0.0 - github.com/ipfs/go-ipfs-exchange-interface v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-interface v0.1.0 + github.com/ipfs/go-ipfs-exchange-offline v0.1.1 github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipfs-http-client v0.0.6 - github.com/ipfs/go-ipfs-routing v0.1.0 + github.com/ipfs/go-ipfs-routing v0.2.1 github.com/ipfs/go-ipfs-util v0.0.2 - github.com/ipfs/go-ipld-cbor v0.0.5 + github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.4.1 + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.2.6 github.com/ipfs/interface-go-ipfs-core v0.4.0 - github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 - github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 + github.com/ipld/go-car v0.3.3 + github.com/ipld/go-car/v2 v2.1.1 github.com/ipld/go-codec-dagpb v1.3.0 - github.com/ipld/go-ipld-prime v0.12.3 - github.com/ipld/go-ipld-selector-text-lite v0.0.0 + github.com/ipld/go-ipld-prime v0.14.3 + github.com/ipld/go-ipld-selector-text-lite v0.0.1 + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.9.0 - github.com/libp2p/go-libp2p-discovery v0.5.1 - github.com/libp2p/go-libp2p-kad-dht v0.13.0 - github.com/libp2p/go-libp2p-mplex v0.4.1 - github.com/libp2p/go-libp2p-noise v0.2.2 - github.com/libp2p/go-libp2p-peerstore v0.3.0 - github.com/libp2p/go-libp2p-pubsub v0.5.4 - github.com/libp2p/go-libp2p-quic-transport v0.11.2 + github.com/libp2p/go-libp2p v0.17.0 + github.com/libp2p/go-libp2p-connmgr v0.3.0 + github.com/libp2p/go-libp2p-core v0.13.0 + github.com/libp2p/go-libp2p-discovery v0.6.0 + github.com/libp2p/go-libp2p-kad-dht v0.15.0 + github.com/libp2p/go-libp2p-noise v0.3.0 + github.com/libp2p/go-libp2p-peerstore v0.6.0 + github.com/libp2p/go-libp2p-pubsub v0.6.0 + github.com/libp2p/go-libp2p-quic-transport v0.15.2 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.5.3 - github.com/libp2p/go-libp2p-tls v0.2.0 - github.com/libp2p/go-libp2p-yamux v0.5.4 + github.com/libp2p/go-libp2p-swarm v0.9.0 + github.com/libp2p/go-libp2p-tls v0.3.1 + github.com/libp2p/go-libp2p-yamux v0.7.0 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-isatty v0.0.14 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 @@ -128,17 +130,18 @@ require ( github.com/multiformats/go-multiaddr v0.4.1 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 - github.com/multiformats/go-multihash v0.0.16 + github.com/multiformats/go-multihash v0.1.0 github.com/multiformats/go-varint v0.0.6 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e github.com/prometheus/client_golang v1.11.0 github.com/raulk/clock v1.1.0 - github.com/raulk/go-watchdog v1.0.1 + github.com/raulk/go-watchdog v1.2.0 github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 + github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 @@ -147,18 +150,24 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.23.0 + go.opentelemetry.io/otel v1.2.0 + go.opentelemetry.io/otel/bridge/opencensus v0.25.0 + go.opentelemetry.io/otel/exporters/jaeger v1.2.0 + go.opentelemetry.io/otel/sdk v1.2.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.19.1 - golang.org/x/net v0.0.0-20210917221730-978cfadd31cf + golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 + golang.org/x/sys v0.0.0-20211209171907-798191bca915 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.7 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible + lukechampine.com/blake3 v1.1.7 // indirect ) replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 95320beba..3b1f06587 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= -contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -117,11 +115,11 @@ github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -159,8 +157,6 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -174,6 +170,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -184,15 +181,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= @@ -231,7 +219,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -243,13 +230,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -257,12 +243,13 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -301,9 +288,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= @@ -320,26 +307,23 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.2 h1:SKLRuGdx/6WlolaWKaUzzUYWGGePuARyO4guxOPxvt4= -github.com/filecoin-project/go-commp-utils v0.1.2/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.11.4 h1:jKvlx0/C8HSyLRn/G1P9TjtfBtFU9jbCvCVFmWbyYVQ= -github.com/filecoin-project/go-data-transfer v1.11.4/go.mod h1:2MitLI0ebCkLlPKM7NRggP/t9d+gCcREUKkCKqWRCwU= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-data-transfer v1.12.0 h1:y44x35JvB93kezahMURKizIa/aizGTPSHqi5cbAfTEo= +github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.13.3 h1:iMCpG7I4fb+YLcgDnMaqZiZiyFZWNvrwHqiFPHB0/tQ= -github.com/filecoin-project/go-fil-markets v1.13.3/go.mod h1:38zuj8AgDvOfdakFLpC/syYIYgXTzkq7xqBJ6T1AuG4= +github.com/filecoin-project/go-fil-markets v1.13.5 h1:NLeF8rI5ZPOJNYEgA6NHrvnuh5QE/2dwuU/7wPW7zP0= +github.com/filecoin-project/go-fil-markets v1.13.5/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -349,7 +333,6 @@ github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AG github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= @@ -363,42 +346,42 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 h1:UmKkt13NrtulubqfNXhG7SQ7Pjza8BeKdNBxngqAo64= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec h1:KV9vE+Sl2Y3qKsrpba4HcE7wHwK7v6O5U/S0xHbje6A= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff h1:JO62nquOGhjoDf9+JkAcV+wsD5yhoyIKOMj70ZNdD3Q= +github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -407,8 +390,9 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -418,9 +402,6 @@ github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdk github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -487,6 +468,8 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -524,7 +507,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -592,7 +574,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -672,20 +653,16 @@ github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyq github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= -github.com/ipfs/go-blockservice v0.1.7 h1:yVe9te0M7ow8i+PPkx03YFSpxqzXx594d6h+34D6qMg= -github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -695,7 +672,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -704,15 +680,15 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -720,41 +696,33 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 h1:W3YMLEvOXqdW+sYMiguhWP6txJwQvIQqhvpU8yAMGQs= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-graphsync v0.10.4 h1:1WZhyOPxgxLvHTIC2GoLltaBrjZ+JuXC2oKAEiX8f3Y= -github.com/ipfs/go-graphsync v0.10.4/go.mod h1:oei4tnWAKnZ6LPnapZGPYVVbyiKV1UP3f8BeLU7Z4JQ= +github.com/ipfs/go-graphsync v0.11.0 h1:PiiD5CnoC3xEHMW8d6uBGqGcoTwiMB5d9CORIEyF6iA= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -769,12 +737,15 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -788,23 +759,26 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -824,15 +798,15 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.4.1 h1:CEEQZnwRkszN06oezuasHwDD823Xcr4p4zluUN9vXqs= -github.com/ipfs/go-merkledag v0.4.1/go.mod h1:56biPaS6e+IS0eXkEt6A8tG+BUQaEIFqDqJuFfQDBoE= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= @@ -841,10 +815,8 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.6.0 h1:BT1/PuNViVomiz1PnnP5+WmKsTNHrxIDvkZrkj4JhOg= -github.com/ipfs/go-peertaskqueue v0.6.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -859,34 +831,30 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrpVwEdoX4KoxiTEX8cZG97v/hTDw= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= -github.com/ipld/go-ipld-prime v0.12.3 h1:furVobw7UBLQZwlEwfE26tYORy3PAK8VYSgZOSr3JMQ= github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= +github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= -github.com/ipld/go-ipld-selector-text-lite v0.0.0 h1:MLU1YUAgd3Z+RfVCXUbvxH1RQjEe+larJ9jmlW1aMgA= -github.com/ipld/go-ipld-selector-text-lite v0.0.0/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -894,7 +862,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -916,8 +883,9 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -960,7 +928,6 @@ github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -972,8 +939,9 @@ github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -992,8 +960,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1007,53 +976,48 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= -github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1083,8 +1047,12 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1093,10 +1061,9 @@ github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2T github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1104,8 +1071,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1124,17 +1091,17 @@ github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aD github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1146,26 +1113,26 @@ github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVd github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= -github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.3.0 h1:wp/G0+37+GLr7tu+wE+4GWNrA3uxKg6IPRigIMSS5oQ= -github.com/libp2p/go-libp2p-peerstore v0.3.0/go.mod h1:fNX9WlOENMvdx/YD7YO/5Hkrn8+lQIk5A39BHa1HIrM= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.5.4 h1:rHl9/Xok4zX3zgi0pg0XnUj9Xj2OeXO8oTu85q2+YA8= -github.com/libp2p/go-libp2p-pubsub v0.5.4/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= -github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1186,14 +1153,13 @@ github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evl github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= -github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1203,22 +1169,26 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1230,10 +1200,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= -github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1250,12 +1220,14 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1268,13 +1240,15 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1289,11 +1263,11 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1301,7 +1275,6 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-ws-transport v0.5.0 h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo= github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= @@ -1311,23 +1284,22 @@ github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= -github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1343,19 +1315,17 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1395,7 +1365,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1476,14 +1445,12 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1492,13 +1459,12 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16 h1:D2qsyy1WVculJbGv69pWmQ36ehxFoA5NiIUr1OEs6qI= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= +github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1539,7 +1505,6 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1550,7 +1515,6 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= @@ -1613,7 +1577,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1650,7 +1613,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1660,8 +1622,8 @@ github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1670,6 +1632,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1757,13 +1721,13 @@ github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5J github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -1820,7 +1784,6 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -1869,6 +1832,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1898,13 +1862,32 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0 h1:YOQDvxO1FayUcT9MIhJhgMyNO1WqoduiyvQHzGN0kUQ= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0 h1:18Ww8TpCEGes12HZJzB2nEbUglvMLzPxqgZypsrKiNc= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0 h1:C/5Egj3MJBXRJi22cSl07suqPqtZLnLFmH//OxETUEc= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0 h1:w/7RXe16WdPylaIXDgcYM6t/q0K5lXgSdZOEbIEyliE= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0 h1:7cXOnCADUsR3+EOqxPaSKwhEuNu0gz/56dRN1hpIdKw= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0 h1:6UjAFmVB5Fza3K5qUJpYWGrk8QMPIqlSnya5FI46VBY= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0 h1:J+Ta+4IAA5W9AdWhGQLfciEpavBqqSkBzTDeYvJLFNU= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0 h1:Ys3iqbqZhcf28hHzrm5WAquMkDHNZTUkw7KHbuNjej0= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1968,11 +1951,9 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1987,8 +1968,9 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2004,7 +1986,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2035,7 +2016,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2053,7 +2033,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2070,7 +2049,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2082,10 +2060,8 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2101,9 +2077,11 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2190,9 +2168,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2217,6 +2193,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2224,9 +2201,11 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2307,16 +2286,15 @@ golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2345,7 +2323,6 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2387,7 +2364,6 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2484,6 +2460,9 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index 8967b0f93..fe0a22e68 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -14,6 +14,7 @@ import ( "github.com/stretchr/testify/require" ) +// TODO: This needs to be repurposed into a SnapDeals test suite func TestCCUpgrade(t *testing.T) { //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 @@ -42,7 +43,7 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { ctx := context.Background() blockTime := 5 * time.Millisecond - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.LatestActorsAt(upgradeHeight)) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.TurboUpgradeAt(upgradeHeight)) ens.InterconnectAll().BeginMining(blockTime) maddr, err := miner.ActorAddress(ctx) diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index 13da758d5..18d8da02a 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -146,7 +146,7 @@ func TestSimultanenousTransferLimit(t *testing.T) { ) runTest := func(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, graphsyncThrottle))), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, 0, graphsyncThrottle))), node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle, graphsyncThrottle)), )) ens.InterconnectAll().BeginMining(250 * time.Millisecond) diff --git a/itests/deals_partial_retrieval_dm-level_test.go b/itests/deals_partial_retrieval_dm-level_test.go new file mode 100644 index 000000000..fd289a0ac --- /dev/null +++ b/itests/deals_partial_retrieval_dm-level_test.go @@ -0,0 +1,252 @@ +package itests + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + api0 "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/stretchr/testify/require" +) + +// please talk to @ribasushi or @mikeal before modifying these test: there are +// downstream dependencies on ADL-less operation +var ( + adlFixtureCar = "fixtures/adl_test.car" + adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy") + adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai") + adlFixturePieceSize = abi.PaddedPieceSize(1024) + dmSelector = api.Selector("Links/0/Hash") + dmTextSelector = textselector.Expression(dmSelector) + dmExpectedResult = "NO ADL" + dmExpectedCarBlockCount = 4 + dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}} +) + +func TestDMLevelPartialRetrieval(t *testing.T) { + + ctx := context.Background() + + policy.SetPreCommitChallengeDelay(2) + kit.QuietMiningLogs() + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs()) + dh := kit.NewDealHarness(t, client, miner, miner) + ens.InterconnectAll().BeginMining(50 * time.Millisecond) + + _, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true}) + require.NoError(t, err) + + caddr, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // + // test retrieval from local car 1st + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + + // + // ensure V0 continues functioning as expected + require.NoError(t, tesV0RetrievalAsCar( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + require.NoError(t, testV0RetrievalAsFile( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + + // + // now perform a storage/retrieval deal as well, and retest + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + Root: adlFixtureRoot, + PieceCid: &adlFixtureCommp, + PieceSize: adlFixturePieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + dh.WaitDealSealed(ctx, proposalCid, false, false, nil) + + offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder := offers[0].Order(caddr) + retOrder.DataSelector = &dmSelector + + rr, err := client.ClientRetrieve(ctx, retOrder) + require.NoError(t, err) + + err = client.ClientRetrieveWait(ctx, rr.DealID) + require.NoError(t, err) + + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + +} + +func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + fileDest := api.FileRef{ + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, fileDest) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + }) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func validateDMUnixFile(r io.Reader) error { + data, err := io.ReadAll(r) + if err != nil { + return err + } + if string(data) != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} + +func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + carDest := api.FileRef{ + IsCAR: true, + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, carDest) + if err != nil { + return err + } + + return validateDMCar(out) +} +func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + IsCAR: true, + }) + if err != nil { + return err + } + + return validateDMCar(out) +} +func validateDMCar(r io.Reader) error { + cr, err := car.NewCarReader(r) + if err != nil { + return err + } + + if len(cr.Header.Roots) != 1 { + return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) + } else if cr.Header.Roots[0].String() != adlFixtureRoot.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String()) + } + + blks := make([]blocks.Block, 0) + for { + b, err := cr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + blks = append(blks, b) + } + + if len(blks) != dmExpectedCarBlockCount { + return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks)) + } + + data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData()) + if data != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} diff --git a/itests/deals_partial_retrieval_test.go b/itests/deals_partial_retrieval_test.go index 4a92db034..abc5cf411 100644 --- a/itests/deals_partial_retrieval_test.go +++ b/itests/deals_partial_retrieval_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -19,7 +21,6 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipld/go-car" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/stretchr/testify/require" ) @@ -29,10 +30,11 @@ var ( sourceCar = "../build/genesis/mainnet.car" carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2") carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina") + selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza") carPieceSize = abi.PaddedPieceSize(2097152) - textSelector = textselector.Expression("8/1/8/1/0/1/0") - textSelectorNonLink = textselector.Expression("8/1/8/1/0/1") - textSelectorNonexistent = textselector.Expression("42") + textSelector = api.Selector("8/1/8/1/0/1/0") + textSelectorNonLink = api.Selector("8/1/8/1/0/1") + textSelectorNonexistent = api.Selector("42") expectedResult = "fil/1/storagepower" ) @@ -60,77 +62,79 @@ func TestPartialRetrieval(t *testing.T) { require.NoError(t, err) // first test retrieval from local car, then do an actual deal - for _, fullCycle := range []bool{false, true} { + for _, exportMerkleProof := range []bool{false, true} { + for _, fullCycle := range []bool{false, true} { - var retOrder api.RetrievalOrder + var retOrder api.RetrievalOrder + var eref api.ExportRef - if !fullCycle { + if !fullCycle { + eref.FromLocalCAR = sourceCar + } else { + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + // FIXME: figure out how to do this with an online partial transfer + TransferType: storagemarket.TTManual, + Root: carRoot, + PieceCid: &carCommp, + PieceSize: carPieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) - retOrder.FromLocalCAR = sourceCar - retOrder.Root = carRoot + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - } else { + err = miner.DealsImportData(ctx, *proposalCid, sourceCar) + require.NoError(t, err) - dp := dh.DefaultStartDealParams() - dp.Data = &storagemarket.DataRef{ - // FIXME: figure out how to do this with an online partial transfer - TransferType: storagemarket.TTManual, - Root: carRoot, - PieceCid: &carCommp, - PieceSize: carPieceSize.Unpadded(), + // Wait for the deal to be published, we should be able to start retrieval right away + dh.WaitDealPublished(ctx, proposalCid) + + offers, err := client.ClientFindData(ctx, carRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder = offers[0].Order(caddr) } - proposalCid := dh.StartDeal(ctx, dp) - //stm: @CLIENT_STORAGE_DEALS_GET_001 - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + retOrder.DataSelector = &textSelector + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &textSelector, + ExportMerkleProof: exportMerkleProof, + }) + eref.Root = carRoot - //stm: @MINER_IMPORT_DEAL_DATA_001 - err = miner.DealsImportData(ctx, *proposalCid, sourceCar) - require.NoError(t, err) + // test retrieval of either data or constructing a partial selective-car + for _, retrieveAsCar := range []bool{false, true} { + outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") + require.NoError(t, err) + defer outFile.Close() //nolint:errcheck - // Wait for the deal to be published, we should be able to start retrieval right away - dh.WaitDealPublished(ctx, proposalCid) + require.NoError(t, testGenesisRetrieval( + ctx, + client, + retOrder, + eref, + &api.FileRef{ + Path: outFile.Name(), + IsCAR: retrieveAsCar, + }, + outFile, + )) - //stm: @CLIENT_RETRIEVAL_FIND_001 - offers, err := client.ClientFindData(ctx, carRoot, nil) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - retOrder = offers[0].Order(caddr) - } - - retOrder.DatamodelPathSelector = &textSelector - - // test retrieval of either data or constructing a partial selective-car - for _, retrieveAsCar := range []bool{false, true} { - outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") - require.NoError(t, err) - defer outFile.Close() //nolint:errcheck - - require.NoError(t, testGenesisRetrieval( - ctx, - client, - retOrder, - &api.FileRef{ - Path: outFile.Name(), - IsCAR: retrieveAsCar, - }, - outFile, - )) - - // UGH if I do not sleep here, I get things like: - /* - retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: - github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve - /home/circleci/project/node/impl/client/client.go:774 - */ - time.Sleep(time.Second) + // UGH if I do not sleep here, I get things like: + /* + retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: + github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve + /home/circleci/project/node/impl/client/client.go:774 + */ + time.Sleep(time.Second) + } } } @@ -141,14 +145,18 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonexistent, + Root: carRoot, + DataSelector: &textSelectorNonexistent, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: path selection '%s' does not match a node within %s", textSelectorNonexistent, carRoot), + fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot), ) // ensure non-boundary retrievals fail @@ -158,18 +166,22 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonLink, + Root: carRoot, + DataSelector: &textSelectorNonLink, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), + fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), ) } -func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, retRef *api.FileRef, outFile *os.File) error { +func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef, outFile *os.File) error { if retOrder.Total.Nil() { retOrder.Total = big.Zero() @@ -178,7 +190,19 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde retOrder.UnsealPrice = big.Zero() } - err := client.ClientRetrieve(ctx, retOrder, retRef) + if eref.FromLocalCAR == "" { + rr, err := client.ClientRetrieve(ctx, retOrder) + if err != nil { + return err + } + eref.DealID = rr.DealID + + if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil { + return xerrors.Errorf("retrieval wait: %w", err) + } + } + + err := client.ClientExport(ctx, eref, *retRef) if err != nil { return err } @@ -200,8 +224,10 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde if len(cr.Header.Roots) != 1 { return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) - } else if cr.Header.Roots[0].String() != carRoot.String() { + } else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() { return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String()) + } else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String()) } blks := make([]blocks.Block, 0) @@ -216,11 +242,11 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde blks = append(blks, b) } - if len(blks) != 3 { - return fmt.Errorf("expected a car file with 3 blocks, got one with %d instead", len(blks)) + if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) { + return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks)) } - data = blks[2].RawData() + data = blks[len(blks)-1].RawData() } if string(data) != expectedResult { diff --git a/itests/fixtures/adl_test.car b/itests/fixtures/adl_test.car new file mode 100644 index 000000000..d00ca0915 Binary files /dev/null and b/itests/fixtures/adl_test.car differ diff --git a/itests/kit/deals.go b/itests/kit/deals.go index 4a9af69e6..29da37c15 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" @@ -320,17 +321,45 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root caddr, err := dh.client.WalletDefaultAddress(ctx) require.NoError(dh.t, err) - ref := &api.FileRef{ - Path: carFile.Name(), - IsCAR: carExport, - } - - updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + updatesCtx, cancel := context.WithCancel(ctx) + updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx) require.NoError(dh.t, err) - for update := range updates { - require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err) + retrievalRes, err := dh.client.ClientRetrieve(ctx, offers[0].Order(caddr)) + require.NoError(dh.t, err) +consumeEvents: + for { + var evt api.RetrievalInfo + select { + case <-updatesCtx.Done(): + dh.t.Fatal("Retrieval Timed Out") + case evt = <-updates: + if evt.ID != retrievalRes.DealID { + continue + } + } + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break consumeEvents + case retrievalmarket.DealStatusRejected: + dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + dh.t.Fatalf("Retrieval Error: %s", evt.Message) + } } + cancel() + + require.NoError(dh.t, dh.client.ClientExport(ctx, + api.ExportRef{ + Root: root, + DealID: retrievalRes.DealID, + }, + api.FileRef{ + Path: carFile.Name(), + IsCAR: carExport, + })) ret := carFile.Name() if carExport { @@ -344,7 +373,7 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), file) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), file) require.NoError(dh.t, err) b, err := bserv.GetBlock(ctx, ch.Roots[0]) diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 90a614645..2a6d16a95 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -487,7 +487,7 @@ func (n *Ensemble) Start() *Ensemble { ds, err := lr.Datastore(context.TODO(), "/metadata") require.NoError(n.t, err) - err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) + err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) require.NoError(n.t, err) nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index 0d7d87e6a..45ed51443 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -49,12 +49,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { }) /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version13, + Network: network.Version14, Height: -1, }, stmgr.Upgrade{ - Network: network.Version14, + Network: network.Version15, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV6, + Migration: filcns.UpgradeActorsV7, }) /* inline-gen end */ } diff --git a/itests/kit/files.go b/itests/kit/files.go index 9babac941..c78352afe 100644 --- a/itests/kit/files.go +++ b/itests/kit/files.go @@ -83,7 +83,7 @@ func CreateRandomCARv1(t *testing.T, rseed, size int) (carV1FilePath string, ori require.NoError(t, car.WriteCar(ctx, dagSvc, []cid.Cid{root}, tmp)) _, err = tmp.Seek(0, io.SeekStart) require.NoError(t, err) - hd, _, err := car.ReadHeader(bufio.NewReader(tmp)) + hd, err := car.ReadHeader(bufio.NewReader(tmp)) require.NoError(t, err) require.EqualValues(t, 1, hd.Version) require.Len(t, hd.Roots, 1) diff --git a/itests/self_sent_txn_test.go b/itests/self_sent_txn_test.go new file mode 100644 index 000000000..b5ec2c0dc --- /dev/null +++ b/itests/self_sent_txn_test.go @@ -0,0 +1,101 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +// these tests check that the versioned code in vm.transfer is functioning correctly across versions! +// we reordered the checks to make sure that a transaction with too much money in it sent to yourself will fail instead of succeeding as a noop +// more info in this PR! https://github.com/filecoin-project/lotus/pull/7637 +func TestSelfSentTxnV15(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client15.WalletSignMessage(ctx, client15.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client15.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client15.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.SysErrInsufficientFunds, mLookup.Receipt.ExitCode) +} + +func TestSelfSentTxnV14(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client14, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version14)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client14.WalletBalance(ctx, client14.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client14.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client14.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client14.WalletSignMessage(ctx, client14.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client14.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client14.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) +} diff --git a/lib/backupds/backupds_test.go b/lib/backupds/backupds_test.go index f7bc36e22..c681491e3 100644 --- a/lib/backupds/backupds_test.go +++ b/lib/backupds/backupds_test.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -17,14 +18,14 @@ const valSize = 512 << 10 func putVals(t *testing.T, ds datastore.Datastore, start, end int) { for i := start; i < end; i++ { - err := ds.Put(datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) + err := ds.Put(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) require.NoError(t, err) } } func checkVals(t *testing.T, ds datastore.Datastore, start, end int, exist bool) { for i := start; i < end; i++ { - v, err := ds.Get(datastore.NewKey(fmt.Sprintf("%d", i))) + v, err := ds.Get(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i))) if exist { require.NoError(t, err) expect := []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize))) @@ -44,7 +45,7 @@ func TestNoLogRestore(t *testing.T) { require.NoError(t, err) var bup bytes.Buffer - require.NoError(t, bds.Backup(&bup)) + require.NoError(t, bds.Backup(context.TODO(), &bup)) putVals(t, ds1, 10, 20) diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go index 350988a50..f0ece10ba 100644 --- a/lib/backupds/datastore.go +++ b/lib/backupds/datastore.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "crypto/sha256" "io" "sync" @@ -52,7 +53,7 @@ func Wrap(child datastore.Batching, logdir string) (*Datastore, error) { // Writes a datastore dump into the provided writer as // [array(*) of [key, value] tuples, checksum] -func (d *Datastore) Backup(out io.Writer) error { +func (d *Datastore) Backup(ctx context.Context, out io.Writer) error { scratch := make([]byte, 9) if err := cbg.WriteMajorTypeHeaderBuf(scratch, out, cbg.MajArray, 2); err != nil { @@ -75,7 +76,7 @@ func (d *Datastore) Backup(out io.Writer) error { log.Info("Starting datastore backup") defer log.Info("Datastore backup done") - qr, err := d.child.Query(query.Query{}) + qr, err := d.child.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query: %w", err) } @@ -132,23 +133,23 @@ func (d *Datastore) Backup(out io.Writer) error { // proxy -func (d *Datastore) Get(key datastore.Key) (value []byte, err error) { - return d.child.Get(key) +func (d *Datastore) Get(ctx context.Context, key datastore.Key) (value []byte, err error) { + return d.child.Get(ctx, key) } -func (d *Datastore) Has(key datastore.Key) (exists bool, err error) { - return d.child.Has(key) +func (d *Datastore) Has(ctx context.Context, key datastore.Key) (exists bool, err error) { + return d.child.Has(ctx, key) } -func (d *Datastore) GetSize(key datastore.Key) (size int, err error) { - return d.child.GetSize(key) +func (d *Datastore) GetSize(ctx context.Context, key datastore.Key) (size int, err error) { + return d.child.GetSize(ctx, key) } -func (d *Datastore) Query(q query.Query) (query.Results, error) { - return d.child.Query(q) +func (d *Datastore) Query(ctx context.Context, q query.Query) (query.Results, error) { + return d.child.Query(ctx, q) } -func (d *Datastore) Put(key datastore.Key, value []byte) error { +func (d *Datastore) Put(ctx context.Context, key datastore.Key, value []byte) error { d.backupLk.RLock() defer d.backupLk.RUnlock() @@ -160,21 +161,21 @@ func (d *Datastore) Put(key datastore.Key, value []byte) error { } } - return d.child.Put(key, value) + return d.child.Put(ctx, key, value) } -func (d *Datastore) Delete(key datastore.Key) error { +func (d *Datastore) Delete(ctx context.Context, key datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Delete(key) + return d.child.Delete(ctx, key) } -func (d *Datastore) Sync(prefix datastore.Key) error { +func (d *Datastore) Sync(ctx context.Context, prefix datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Sync(prefix) + return d.child.Sync(ctx, prefix) } func (d *Datastore) CloseLog() error { @@ -196,8 +197,8 @@ func (d *Datastore) Close() error { ) } -func (d *Datastore) Batch() (datastore.Batch, error) { - b, err := d.child.Batch() +func (d *Datastore) Batch(ctx context.Context) (datastore.Batch, error) { + b, err := d.child.Batch(ctx) if err != nil { return nil, err } @@ -215,7 +216,7 @@ type bbatch struct { rlk sync.Locker } -func (b *bbatch) Put(key datastore.Key, value []byte) error { +func (b *bbatch) Put(ctx context.Context, key datastore.Key, value []byte) error { if b.d.log != nil { b.d.log <- Entry{ Key: []byte(key.String()), @@ -224,18 +225,18 @@ func (b *bbatch) Put(key datastore.Key, value []byte) error { } } - return b.b.Put(key, value) + return b.b.Put(ctx, key, value) } -func (b *bbatch) Delete(key datastore.Key) error { - return b.b.Delete(key) +func (b *bbatch) Delete(ctx context.Context, key datastore.Key) error { + return b.b.Delete(ctx, key) } -func (b *bbatch) Commit() error { +func (b *bbatch) Commit(ctx context.Context) error { b.rlk.Lock() defer b.rlk.Unlock() - return b.b.Commit() + return b.b.Commit(ctx) } var _ datastore.Batch = &bbatch{} diff --git a/lib/backupds/log.go b/lib/backupds/log.go index b76dfbfe6..b89f410f0 100644 --- a/lib/backupds/log.go +++ b/lib/backupds/log.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "fmt" "io" "io/ioutil" @@ -100,6 +101,7 @@ type logfile struct { var compactThresh = 2 func (d *Datastore) createLog(logdir string) (*logfile, string, error) { + ctx := context.TODO() p := filepath.Join(logdir, strconv.FormatInt(time.Now().Unix(), 10)+".log.cbor") log.Infow("creating log", "file", p) @@ -108,7 +110,7 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { return nil, "", err } - if err := d.Backup(f); err != nil { + if err := d.Backup(ctx, f); err != nil { return nil, "", xerrors.Errorf("writing log base: %w", err) } if err := f.Sync(); err != nil { @@ -122,8 +124,9 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { } func (d *Datastore) openLog(p string) (*logfile, string, error) { + ctx := context.TODO() log.Infow("opening log", "file", p) - lh, err := d.child.Get(loghead) + lh, err := d.child.Get(ctx, loghead) if err != nil { return nil, "", xerrors.Errorf("checking log head (logfile '%s'): %w", p, err) } @@ -212,6 +215,7 @@ func (d *Datastore) openLog(p string) (*logfile, string, error) { } func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { + ctx := context.TODO() lval := []byte(fmt.Sprintf("%s;%s;%d", logname, uuid.New(), time.Now().Unix())) err := l.writeEntry(&Entry{ @@ -223,7 +227,7 @@ func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { return xerrors.Errorf("writing loghead to the log: %w", err) } - if err := ds.Put(loghead, lval); err != nil { + if err := ds.Put(ctx, loghead, lval); err != nil { return xerrors.Errorf("writing loghead to the datastore: %w", err) } diff --git a/lib/backupds/read.go b/lib/backupds/read.go index a44442af1..af4f30888 100644 --- a/lib/backupds/read.go +++ b/lib/backupds/read.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "crypto/sha256" "io" "os" @@ -117,13 +118,13 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte, log bool) } func RestoreInto(r io.Reader, dest datastore.Batching) error { - batch, err := dest.Batch() + batch, err := dest.Batch(context.TODO()) if err != nil { return xerrors.Errorf("creating batch: %w", err) } _, err = ReadBackup(r, func(key datastore.Key, value []byte, _ bool) error { - if err := batch.Put(key, value); err != nil { + if err := batch.Put(context.TODO(), key, value); err != nil { return xerrors.Errorf("put key: %w", err) } @@ -133,7 +134,7 @@ func RestoreInto(r io.Reader, dest datastore.Batching) error { return xerrors.Errorf("reading backup: %w", err) } - if err := batch.Commit(); err != nil { + if err := batch.Commit(context.TODO()); err != nil { return xerrors.Errorf("committing batch: %w", err) } diff --git a/lib/tracing/setup.go b/lib/tracing/setup.go index b8c0399ad..d90099f79 100644 --- a/lib/tracing/setup.go +++ b/lib/tracing/setup.go @@ -4,9 +4,16 @@ import ( "os" "strings" - "contrib.go.opencensus.io/exporter/jaeger" + octrace "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.uber.org/zap" + logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" ) var log = logging.Logger("tracing") @@ -14,7 +21,6 @@ var log = logging.Logger("tracing") const ( // environment variable names envCollectorEndpoint = "LOTUS_JAEGER_COLLECTOR_ENDPOINT" - envAgentEndpoint = "LOTUS_JAEGER_AGENT_ENDPOINT" envAgentHost = "LOTUS_JAEGER_AGENT_HOST" envAgentPort = "LOTUS_JAEGER_AGENT_PORT" envJaegerUser = "LOTUS_JAEGER_USERNAME" @@ -26,54 +32,61 @@ const ( // The agent endpoint is a thrift/udp protocol and should be given // as a string like "hostname:port". The agent can also be configured // with separate host and port variables. -func jaegerOptsFromEnv(opts *jaeger.Options) bool { +func jaegerOptsFromEnv() jaeger.EndpointOption { var e string var ok bool - if e, ok = os.LookupEnv(envJaegerUser); ok { - if p, ok := os.LookupEnv(envJaegerCred); ok { - opts.Username = e - opts.Password = p - } else { - log.Warn("jaeger username supplied with no password. authentication will not be used.") - } - } + if e, ok = os.LookupEnv(envCollectorEndpoint); ok { - opts.CollectorEndpoint = e - log.Infof("jaeger tracess will send to collector %s", e) - return true - } - if e, ok = os.LookupEnv(envAgentEndpoint); ok { - log.Infof("jaeger traces will be sent to agent %s", e) - opts.AgentEndpoint = e - return true - } - if e, ok = os.LookupEnv(envAgentHost); ok { - if p, ok := os.LookupEnv(envAgentPort); ok { - opts.AgentEndpoint = strings.Join([]string{e, p}, ":") - } else { - opts.AgentEndpoint = strings.Join([]string{e, "6831"}, ":") + options := []jaeger.CollectorEndpointOption{jaeger.WithEndpoint(e)} + if u, ok := os.LookupEnv(envJaegerUser); ok { + if p, ok := os.LookupEnv(envJaegerCred); ok { + options = append(options, jaeger.WithUsername(u)) + options = append(options, jaeger.WithPassword(p)) + } else { + log.Warn("jaeger username supplied with no password. authentication will not be used.") + } } - log.Infof("jaeger traces will be sent to agent %s", opts.AgentEndpoint) - return true + log.Infof("jaeger tracess will send to collector %s", e) + return jaeger.WithCollectorEndpoint(options...) } - return false + + if e, ok = os.LookupEnv(envAgentHost); ok { + options := []jaeger.AgentEndpointOption{jaeger.WithAgentHost(e), jaeger.WithLogger(zap.NewStdLog(log.Desugar()))} + var ep string + if p, ok := os.LookupEnv(envAgentPort); ok { + options = append(options, jaeger.WithAgentPort(p)) + ep = strings.Join([]string{e, p}, ":") + } else { + ep = strings.Join([]string{e, "6831"}, ":") + } + log.Infof("jaeger traces will be sent to agent %s", ep) + return jaeger.WithAgentEndpoint(options...) + } + return nil } -func SetupJaegerTracing(serviceName string) *jaeger.Exporter { - opts := jaeger.Options{} - if !jaegerOptsFromEnv(&opts) { +func SetupJaegerTracing(serviceName string) *tracesdk.TracerProvider { + jaegerEndpoint := jaegerOptsFromEnv() + if jaegerEndpoint == nil { return nil } - opts.ServiceName = serviceName - je, err := jaeger.NewExporter(opts) + je, err := jaeger.New(jaegerEndpoint) if err != nil { log.Errorw("failed to create the jaeger exporter", "error", err) return nil } - - trace.RegisterExporter(je) - trace.ApplyConfig(trace.Config{ - DefaultSampler: trace.AlwaysSample(), - }) - return je + tp := tracesdk.NewTracerProvider( + // Always be sure to batch in production. + tracesdk.WithBatcher(je), + // Record information about this application in an Resource. + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(serviceName), + )), + tracesdk.WithSampler(tracesdk.AlwaysSample()), + ) + otel.SetTracerProvider(tp) + tracer := tp.Tracer(serviceName) + octrace.DefaultTracer = opencensus.NewTracer(tracer) + return tp } diff --git a/lotuspond/front/package-lock.json b/lotuspond/front/package-lock.json index 8df204f2e..252a42a6d 100644 --- a/lotuspond/front/package-lock.json +++ b/lotuspond/front/package-lock.json @@ -3569,9 +3569,9 @@ "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" }, "color-string": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.5.3.tgz", - "integrity": "sha512-dC2C5qeWoYkxki5UAXapdjqO672AM4vZuPGRQfO8b5HKuKGBbKWpITyDYN7TOFKvRW7kOgAn3746clDBMDJyQw==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.6.0.tgz", + "integrity": "sha512-c/hGS+kRWJutUBEngKKmk4iH3sD59MBkoxVapS/0wgpCz2u7XsNloxknyvBhzwEs1IbV36D9PwqLPJ2DTu3vMA==", "requires": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" @@ -7814,9 +7814,9 @@ "integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==" }, "ws": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz", + "integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==", "requires": { "async-limiter": "~1.0.0" } @@ -9194,9 +9194,9 @@ "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=" }, "path-parse": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz", - "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==" + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "path-to-regexp": { "version": "0.1.7", @@ -9228,6 +9228,11 @@ "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" }, + "picocolors": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-0.2.1.tgz", + "integrity": "sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==" + }, "pify": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", @@ -9354,27 +9359,18 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.17", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.17.tgz", - "integrity": "sha512-546ZowA+KZ3OasvQZHsbuEpysvwTZNGJv9EfyCQdsIDltPSWHAeTQ5fQy/Npi2ZDtLI3zs7Ps/p6wThErhm9fQ==", + "version": "7.0.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.39.tgz", + "integrity": "sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA==", "requires": { - "chalk": "^2.4.2", - "source-map": "^0.6.1", - "supports-color": "^6.1.0" + "picocolors": "^0.2.1", + "source-map": "^0.6.1" }, "dependencies": { "source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "supports-color": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", - "integrity": "sha512-qe1jfm1Mg7Nq/NSh6XE24gPXROEVsWHxC1LIx//XNlD9iw7YZQGjZNjYN7xGaEG6iKdA8EtNFW6R0gjnVXp+wQ==", - "requires": { - "has-flag": "^3.0.0" - } } } }, @@ -11057,9 +11053,9 @@ } }, "ws": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz", - "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.3.tgz", + "integrity": "sha512-jZArVERrMsKUatIdnLzqvcfydI85dvd/Fp1u/VOpfdDWQ4c9qWXe+VIeAbQ5FrDwciAkr+lzofXLz3Kuf26AOA==", "requires": { "async-limiter": "~1.0.0" } @@ -12203,9 +12199,9 @@ } }, "tmpl": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.4.tgz", - "integrity": "sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE=" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz", + "integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==" }, "to-arraybuffer": { "version": "1.0.1", @@ -12523,9 +12519,9 @@ } }, "url-parse": { - "version": "1.4.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.4.7.tgz", - "integrity": "sha512-d3uaVyzDB9tQoSXFvuSUNFibTd9zxd2bkVrDRvF5TmvWWQwqE4lgYJ5m+x1DbecWkw+LK4RNl2CU1hHuOKPVlg==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz", + "integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -13164,9 +13160,9 @@ } }, "ws": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.1.tgz", - "integrity": "sha512-GIyAXC2cB7LjvpgMt9EKS2ldqr0MTrORaleiOno6TweZ6r3TKtoFQWay/2PceJ3RuBasOHzXNn5Lrw1X0bEjqA==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ws/-/ws-6.2.2.tgz", + "integrity": "sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw==", "requires": { "async-limiter": "~1.0.0" } diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index c0f69d58c..1f6191a94 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -622,5 +622,112 @@ "AddVerifiedClient", "UseBytes", "RestoreBytes" + ], + "fil/7/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/7/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/7/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/7/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/7/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/7/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/7/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/7/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate", + "ProveReplicaUpdates" + ], + "fil/7/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/7/system": [ + "Send", + "Constructor" + ], + "fil/7/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" ] } \ No newline at end of file diff --git a/markets/dagstore/blockstore.go b/markets/dagstore/blockstore.go index 8980d40cf..317cb08b9 100644 --- a/markets/dagstore/blockstore.go +++ b/markets/dagstore/blockstore.go @@ -1,6 +1,7 @@ package dagstore import ( + "context" "io" blocks "github.com/ipfs/go-block-format" @@ -20,14 +21,14 @@ type Blockstore struct { var _ bstore.Blockstore = (*Blockstore)(nil) -func (b *Blockstore) DeleteBlock(c cid.Cid) error { +func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.Errorf("DeleteBlock called but not implemented") } -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(context.Context, blocks.Block) error { return xerrors.Errorf("Put called but not implemented") } -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.Errorf("PutMany called but not implemented") } diff --git a/markets/dagstore/miner_api.go b/markets/dagstore/miner_api.go index afe623eb2..77b4b97bf 100644 --- a/markets/dagstore/miner_api.go +++ b/markets/dagstore/miner_api.go @@ -3,34 +3,43 @@ package dagstore import ( "context" "fmt" - "io" - "github.com/filecoin-project/dagstore/throttle" "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/dagstore/throttle" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-state-types/abi" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI + type MinerAPI interface { - FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) + FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) Start(ctx context.Context) error } +type SectorAccessor interface { + retrievalmarket.SectorAccessor + + UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) +} + type minerAPI struct { pieceStore piecestore.PieceStore - sa retrievalmarket.SectorAccessor + sa SectorAccessor throttle throttle.Throttler readyMgr *shared.ReadyManager } var _ MinerAPI = (*minerAPI)(nil) -func NewMinerAPI(store piecestore.PieceStore, sa retrievalmarket.SectorAccessor, concurrency int) MinerAPI { +func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int) MinerAPI { return &minerAPI{ pieceStore: store, sa: sa, @@ -91,7 +100,7 @@ func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, erro return false, nil } -func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) { err := m.readyMgr.AwaitReady() if err != nil { return nil, err @@ -117,7 +126,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io deal := deal // Throttle this path to avoid flooding the storage subsystem. - var reader io.ReadCloser + var reader mount.Reader err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) if err != nil { @@ -127,7 +136,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io return nil } // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. - reader, err = m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) return err }) @@ -149,7 +158,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io // block for a long time with the current PoRep // // This path is unthrottled. - reader, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + reader, err := m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) if err != nil { lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err) log.Warn(lastErr.Error()) diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go index 4a61c62a8..45cbf2461 100644 --- a/markets/dagstore/miner_api_test.go +++ b/markets/dagstore/miner_api_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/actors/builtin/paych" @@ -203,6 +204,10 @@ type mockRPN struct { } func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return m.UnsealSectorAt(ctx, sectorID, offset, length) +} + +func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { atomic.AddInt32(&m.calls, 1) m.lk.RLock() defer m.lk.RUnlock() @@ -211,7 +216,13 @@ func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, o if !ok { panic("sector not found") } - return io.NopCloser(bytes.NewBuffer([]byte(data))), nil + return struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))), + }, nil } func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { diff --git a/markets/dagstore/mocks/mock_lotus_accessor.go b/markets/dagstore/mocks/mock_lotus_accessor.go index 2e19b4482..19923cc2a 100644 --- a/markets/dagstore/mocks/mock_lotus_accessor.go +++ b/markets/dagstore/mocks/mock_lotus_accessor.go @@ -1,96 +1,96 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: lotusaccessor.go +// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI) // Package mock_dagstore is a generated GoMock package. package mock_dagstore import ( context "context" - io "io" reflect "reflect" + mount "github.com/filecoin-project/dagstore/mount" gomock "github.com/golang/mock/gomock" cid "github.com/ipfs/go-cid" ) -// MockLotusAccessor is a mock of LotusAccessor interface. -type MockLotusAccessor struct { +// MockMinerAPI is a mock of MinerAPI interface. +type MockMinerAPI struct { ctrl *gomock.Controller - recorder *MockLotusAccessorMockRecorder + recorder *MockMinerAPIMockRecorder } -// MockLotusAccessorMockRecorder is the mock recorder for MockLotusAccessor. -type MockLotusAccessorMockRecorder struct { - mock *MockLotusAccessor +// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI. +type MockMinerAPIMockRecorder struct { + mock *MockMinerAPI } -// NewMockLotusAccessor creates a new mock instance. -func NewMockLotusAccessor(ctrl *gomock.Controller) *MockLotusAccessor { - mock := &MockLotusAccessor{ctrl: ctrl} - mock.recorder = &MockLotusAccessorMockRecorder{mock} +// NewMockMinerAPI creates a new mock instance. +func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI { + mock := &MockMinerAPI{ctrl: ctrl} + mock.recorder = &MockMinerAPIMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLotusAccessor) EXPECT() *MockLotusAccessorMockRecorder { +func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder { return m.recorder } // FetchUnsealedPiece mocks base method. -func (m *MockLotusAccessor) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchUnsealedPiece", ctx, pieceCid) - ret0, _ := ret[0].(io.ReadCloser) + ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1) + ret0, _ := ret[0].(mount.Reader) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece. -func (mr *MockLotusAccessorMockRecorder) FetchUnsealedPiece(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockLotusAccessor)(nil).FetchUnsealedPiece), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1) } // GetUnpaddedCARSize mocks base method. -func (m *MockLotusAccessor) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) { +func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnpaddedCARSize", ctx, pieceCid) + ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize. -func (mr *MockLotusAccessorMockRecorder) GetUnpaddedCARSize(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockLotusAccessor)(nil).GetUnpaddedCARSize), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1) } // IsUnsealed mocks base method. -func (m *MockLotusAccessor) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) { +func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnsealed", ctx, pieceCid) + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // IsUnsealed indicates an expected call of IsUnsealed. -func (mr *MockLotusAccessorMockRecorder) IsUnsealed(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockLotusAccessor)(nil).IsUnsealed), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1) } // Start mocks base method. -func (m *MockLotusAccessor) Start(ctx context.Context) error { +func (m *MockMinerAPI) Start(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", ctx) + ret := m.ctrl.Call(m, "Start", arg0) ret0, _ := ret[0].(error) return ret0 } // Start indicates an expected call of Start. -func (mr *MockLotusAccessorMockRecorder) Start(ctx interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockLotusAccessor)(nil).Start), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0) } diff --git a/markets/dagstore/mount.go b/markets/dagstore/mount.go index c97dcbf86..0ecdc9808 100644 --- a/markets/dagstore/mount.go +++ b/markets/dagstore/mount.go @@ -2,7 +2,6 @@ package dagstore import ( "context" - "io" "net/url" "github.com/ipfs/go-cid" @@ -57,19 +56,15 @@ func (l *LotusMount) Deserialize(u *url.URL) error { } func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) { - r, err := l.API.FetchUnsealedPiece(ctx, l.PieceCid) - if err != nil { - return nil, xerrors.Errorf("failed to fetch unsealed piece %s: %w", l.PieceCid, err) - } - return &readCloser{r}, nil + return l.API.FetchUnsealedPiece(ctx, l.PieceCid) } func (l *LotusMount) Info() mount.Info { return mount.Info{ Kind: mount.KindRemote, AccessSequential: true, - AccessSeek: false, - AccessRandom: false, + AccessSeek: true, + AccessRandom: true, } } @@ -94,17 +89,3 @@ func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) { Ready: isUnsealed, }, nil } - -type readCloser struct { - io.ReadCloser -} - -var _ mount.Reader = (*readCloser)(nil) - -func (r *readCloser) ReadAt(p []byte, off int64) (n int, err error) { - return 0, xerrors.Errorf("ReadAt called but not implemented") -} - -func (r *readCloser) Seek(offset int64, whence int) (int64, error) { - return 0, xerrors.Errorf("Seek called but not implemented") -} diff --git a/markets/dagstore/mount_test.go b/markets/dagstore/mount_test.go index 09b255d6a..d6ea54964 100644 --- a/markets/dagstore/mount_test.go +++ b/markets/dagstore/mount_test.go @@ -2,6 +2,7 @@ package dagstore import ( "context" + "io" "io/ioutil" "net/url" "strings" @@ -12,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/dagstore/mount" - mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks" ) @@ -26,12 +26,31 @@ func TestLotusMount(t *testing.T) { defer mockCtrl.Finish() // create a mock lotus api that returns the reader we want - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) + mr1 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + mr2 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1) + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1) mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1) mnt, err := NewLotusMount(cid, mockLotusMountAPI) @@ -109,7 +128,7 @@ func TestLotusMountRegistration(t *testing.T) { // when test is done, assert expectations on all mock objects. defer mockCtrl.Finish() - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) registry := mount.NewRegistry() err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI)) require.NoError(t, err) diff --git a/markets/dagstore/wrapper_migration_test.go b/markets/dagstore/wrapper_migration_test.go index 13d8db876..e46f8779b 100644 --- a/markets/dagstore/wrapper_migration_test.go +++ b/markets/dagstore/wrapper_migration_test.go @@ -2,13 +2,16 @@ package dagstore import ( "context" + "io" "testing" - "github.com/filecoin-project/dagstore" "github.com/stretchr/testify/require" + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" tut "github.com/filecoin-project/go-fil-markets/shared_testutil" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -93,7 +96,7 @@ func TestShardRegistration(t *testing.T) { cfg := config.DefaultStorageMiner().DAGStore cfg.RootDir = t.TempDir() - mapi := NewMinerAPI(ps, sa, 10) + mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10) dagst, w, err := NewDAGStore(cfg, mapi) require.NoError(t, err) require.NotNil(t, dagst) @@ -119,3 +122,25 @@ func TestShardRegistration(t *testing.T) { // ps.VerifyExpectations(t) } + +type wrappedSA struct { + retrievalmarket.SectorAccessor +} + +func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length) + if err != nil { + return nil, err + } + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: r, + Seeker: nil, + ReaderAt: nil, + }, err +} + +var _ SectorAccessor = &wrappedSA{} diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go index 9d3e6939e..48e01100b 100644 --- a/markets/dagstore/wrapper_test.go +++ b/markets/dagstore/wrapper_test.go @@ -3,21 +3,19 @@ package dagstore import ( "bytes" "context" - "io" "os" "testing" "time" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/dagstore" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/node/config" ) // TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found" @@ -191,7 +189,7 @@ func (m mockLotusMount) Start(ctx context.Context) error { return nil } -func (m mockLotusMount) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) { panic("implement me") } diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go index 1304a3a00..4320e3fb1 100644 --- a/markets/sectoraccessor/sectoraccessor.go +++ b/markets/sectoraccessor/sectoraccessor.go @@ -4,23 +4,24 @@ import ( "context" "io" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/sectorblocks" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-state-types/abi" - specstorage "github.com/filecoin-project/specs-storage/storage" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("sectoraccessor") @@ -34,12 +35,16 @@ type sectorAccessor struct { var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil) -func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.SectorAccessor { +func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor { return §orAccessor{address.Address(maddr), secb, pp, full} } -func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) +func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length) +} + +func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length) si, err := sa.sectorsStatus(ctx, sectorID, false) if err != nil { return nil, err @@ -64,8 +69,8 @@ func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorN } // Get a reader for the piece, unsealing the piece if necessary - log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) - r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) + log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid) + r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD) if err != nil { return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) } diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 5c82d0dc8..bb9863bc4 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -228,7 +228,7 @@ func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Addre // TODO: why doesnt this method take in a sector ID? func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { - refs, err := n.secb.GetRefs(dealID) + refs, err := n.secb.GetRefs(ctx, dealID) if err != nil { return 0, 0, 0, err } diff --git a/metrics/metrics.go b/metrics/metrics.go index b969a4422..b4032bb1d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -128,6 +128,15 @@ var ( StorageLimitUsedBytes = stats.Int64("storage/path_limit_used_bytes", "used optional storage limit bytes", stats.UnitBytes) StorageLimitMaxBytes = stats.Int64("storage/path_limit_max_bytes", "optional storage limit", stats.UnitBytes) + DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless) + DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes) + DagStorePRBytesDiscarded = stats.Int64("dagstore/pr_discarded_bytes", "PieceReader discarded bytes", stats.UnitBytes) + DagStorePRDiscardCount = stats.Int64("dagstore/pr_discard_count", "PieceReader discard count", stats.UnitDimensionless) + DagStorePRSeekBackCount = stats.Int64("dagstore/pr_seek_back_count", "PieceReader seek back count", stats.UnitDimensionless) + DagStorePRSeekForwardCount = stats.Int64("dagstore/pr_seek_forward_count", "PieceReader seek forward count", stats.UnitDimensionless) + DagStorePRSeekBackBytes = stats.Int64("dagstore/pr_seek_back_bytes", "PieceReader seek back bytes", stats.UnitBytes) + DagStorePRSeekForwardBytes = stats.Int64("dagstore/pr_seek_forward_bytes", "PieceReader seek forward bytes", stats.UnitBytes) + // splitstore SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless) SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds) @@ -142,7 +151,7 @@ var ( Description: "Lotus node information", Measure: LotusInfo, Aggregation: view.LastValue(), - TagKeys: []tag.Key{Version, Commit}, + TagKeys: []tag.Key{Version, Commit, NodeType}, } ChainNodeHeightView = &view.View{ Measure: ChainNodeHeight, @@ -383,6 +392,39 @@ var ( TagKeys: []tag.Key{StorageID}, } + DagStorePRInitCountView = &view.View{ + Measure: DagStorePRInitCount, + Aggregation: view.Count(), + } + DagStorePRBytesRequestedView = &view.View{ + Measure: DagStorePRBytesRequested, + Aggregation: view.Sum(), + } + DagStorePRBytesDiscardedView = &view.View{ + Measure: DagStorePRBytesDiscarded, + Aggregation: view.Sum(), + } + DagStorePRDiscardCountView = &view.View{ + Measure: DagStorePRDiscardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackCountView = &view.View{ + Measure: DagStorePRSeekBackCount, + Aggregation: view.Count(), + } + DagStorePRSeekForwardCountView = &view.View{ + Measure: DagStorePRSeekForwardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackBytesView = &view.View{ + Measure: DagStorePRSeekBackBytes, + Aggregation: view.Sum(), + } + DagStorePRSeekForwardBytesView = &view.View{ + Measure: DagStorePRSeekForwardBytes, + Aggregation: view.Sum(), + } + // splitstore SplitstoreMissView = &view.View{ Measure: SplitstoreMiss, @@ -539,6 +581,14 @@ var MinerNodeViews = append([]*view.View{ StorageReservedBytesView, StorageLimitUsedBytesView, StorageLimitMaxBytesView, + DagStorePRInitCountView, + DagStorePRBytesRequestedView, + DagStorePRBytesDiscardedView, + DagStorePRDiscardCountView, + DagStorePRSeekBackCountView, + DagStorePRSeekForwardCountView, + DagStorePRSeekBackBytesView, + DagStorePRSeekForwardBytesView, }, DefaultViews...) // SinceInMilliseconds returns the duration of time since the provide time as a float64. diff --git a/miner/miner.go b/miner/miner.go index 582ade723..c5f0a0129 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -325,7 +325,7 @@ minerLoop: "block-time", btime, "time", build.Clock.Now(), "difference", build.Clock.Since(btime)) } - if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { + if err := m.sf.MinedBlock(ctx, b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { continue diff --git a/node/builder.go b/node/builder.go index 3f2e59503..96d217ec3 100644 --- a/node/builder.go +++ b/node/builder.go @@ -19,7 +19,6 @@ import ( "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-peerstore/pstoremem" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/p2p/net/conngater" @@ -162,6 +161,12 @@ func defaults() []Option { }), Override(new(dtypes.ShutdownChan), make(chan struct{})), + + // the great context in the sky, otherwise we can't DI build genesis; there has to be a better + // solution than this hack. + Override(new(context.Context), func(lc fx.Lifecycle, mctx helpers.MetricsCtx) context.Context { + return helpers.LifecycleCtx(mctx, lc) + }), } } @@ -170,14 +175,14 @@ var LibP2P = Options( Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), // Host dependencies - Override(new(peerstore.Peerstore), pstoremem.NewPeerstore), + Override(new(peerstore.Peerstore), lp2p.Peerstore), Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys), Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)), // Host settings Override(DefaultTransportsKey, lp2p.DefaultTransports), Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)), - Override(SmuxTransportKey, lp2p.SmuxTransport(true)), + Override(SmuxTransportKey, lp2p.SmuxTransport()), Override(RelayKey, lp2p.NoRelay()), Override(SecurityKey, lp2p.Security(true, false)), diff --git a/node/builder_miner.go b/node/builder_miner.go index 3447eb3e6..c8a04255f 100644 --- a/node/builder_miner.go +++ b/node/builder_miner.go @@ -136,7 +136,7 @@ func ConfigStorageMiner(c interface{}) Option { If(cfg.Subsystems.EnableMarkets, // Markets Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForStoragePerClient, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), @@ -155,7 +155,8 @@ func ConfigStorageMiner(c interface{}) Option { Override(DAGStoreKey, modules.DAGStore), // Markets (retrieval) - Override(new(retrievalmarket.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))), Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), diff --git a/node/config/def.go b/node/config/def.go index 735107e29..4a525e697 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -160,8 +160,9 @@ func DefaultStorageMiner() *StorageMiner { MaxDealsPerPublishMsg: 8, MaxProviderCollateralMultiplier: 2, - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, + SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, + SimultaneousTransfersForStoragePerClient: 0, + SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 296501edc..eded0b1fe 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -272,6 +272,17 @@ passed to the sealing node by the markets service. 0 is unlimited.`, Comment: `The maximum number of parallel online data transfers for storage deals`, }, + { + Name: "SimultaneousTransfersForStoragePerClient", + Type: "uint64", + + Comment: `The maximum number of simultaneous data transfers from any single client +for storage deals. +Unset by default (0), and values higher than SimultaneousTransfersForStorage +will have no effect; i.e. the total number of simultaneous data transfers +across all storage clients is bound by SimultaneousTransfersForStorage +regardless of this number.`, + }, { Name: "SimultaneousTransfersForRetrieval", Type: "uint64", diff --git a/node/config/types.go b/node/config/types.go index 1be40029e..2ae2d8eee 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -131,6 +131,13 @@ type DealmakingConfig struct { MaxStagingDealsBytes int64 // The maximum number of parallel online data transfers for storage deals SimultaneousTransfersForStorage uint64 + // The maximum number of simultaneous data transfers from any single client + // for storage deals. + // Unset by default (0), and values higher than SimultaneousTransfersForStorage + // will have no effect; i.e. the total number of simultaneous data transfers + // across all storage clients is bound by SimultaneousTransfersForStorage + // regardless of this number. + SimultaneousTransfersForStoragePerClient uint64 // The maximum number of parallel online data transfers for retrieval deals SimultaneousTransfersForRetrieval uint64 // Minimum start epoch buffer to give time for sealing of sector with deal. diff --git a/node/hello/hello.go b/node/hello/hello.go index 5461dcc87..ac92d5170 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -141,7 +141,7 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { return err } - gen, err := hs.cs.GetGenesis() + gen, err := hs.cs.GetGenesis(ctx) if err != nil { return err } diff --git a/node/impl/backup.go b/node/impl/backup.go index 10f673a4b..7acc7e018 100644 --- a/node/impl/backup.go +++ b/node/impl/backup.go @@ -1,6 +1,7 @@ package impl import ( + "context" "os" "path/filepath" "strings" @@ -12,7 +13,7 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func backup(mds dtypes.MetadataDS, fpath string) error { +func backup(ctx context.Context, mds dtypes.MetadataDS, fpath string) error { bb, ok := os.LookupEnv("LOTUS_BACKUP_BASE_PATH") if !ok { return xerrors.Errorf("LOTUS_BACKUP_BASE_PATH env var not set") @@ -52,7 +53,7 @@ func backup(mds dtypes.MetadataDS, fpath string) error { return xerrors.Errorf("open %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(ctx, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 199a2122d..e3c365fee 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -4,17 +4,22 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "os" "sort" + "strings" "time" bstore "github.com/ipfs/go-ipfs-blockstore" + format "github.com/ipfs/go-ipld-format" unixfile "github.com/ipfs/go-unixfs/file" "github.com/ipld/go-car" + "github.com/ipld/go-car/util" carv2 "github.com/ipld/go-car/v2" carv2bs "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime/datamodel" "golang.org/x/xerrors" "github.com/filecoin-project/go-padreader" @@ -58,7 +63,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/repo/imports" @@ -146,7 +150,7 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt if err != nil { return nil, xerrors.Errorf("failed to find blockstore for root CID: %w", err) } - if has, err := bs.Has(params.Data.Root); err != nil { + if has, err := bs.Has(ctx, params.Data.Root); err != nil { return nil, xerrors.Errorf("failed to query blockstore for root CID: %w", err) } else if !has { return nil, xerrors.Errorf("failed to find root CID in blockstore: %w", err) @@ -516,7 +520,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor } defer f.Close() //nolint:errcheck - hd, _, err := car.ReadHeader(bufio.NewReader(f)) + hd, err := car.ReadHeader(bufio.NewReader(f)) if err != nil { return nil, xerrors.Errorf("failed to read CAR header: %w", err) } @@ -760,325 +764,405 @@ func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID rm.DealID) e } } -func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) - - for { - select { - case evt, ok := <-events: - if !ok { // done successfully - return nil - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } -} - -func (a *API) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) - return events, nil -} - -type retrievalSubscribeEvent struct { - event rm.ClientEvent - state rm.ClientDealState -} - -func consumeAllEvents(ctx context.Context, dealID rm.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error { - for { - var subscribeEvent retrievalSubscribeEvent - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case subscribeEvent = <-subscribeEvents: - if subscribeEvent.state.ID != dealID { - // we can't check the deal ID ahead of time because: - // 1. We need to subscribe before retrieving. - // 2. We won't know the deal ID until after retrieving. - continue - } - } - - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case events <- marketevents.RetrievalEvent{ - Event: subscribeEvent.event, - Status: subscribeEvent.state.Status, - BytesReceived: subscribeEvent.state.TotalReceived, - FundsSpent: subscribeEvent.state.FundsSpent, - }: - } - - state := subscribeEvent.state - switch state.Status { - case rm.DealStatusCompleted: - return nil - case rm.DealStatusRejected: - return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) - case rm.DealStatusCancelled: - return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) - case - rm.DealStatusDealNotFound, - rm.DealStatusErrored: - return xerrors.Errorf("Retrieval Error: %s", state.Message) - } - } -} - -func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { - defer close(events) - - finish := func(e error) { - if e != nil { - events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} - } - } - +func getDataSelector(dps *api.Selector, matchPath bool) (datamodel.Node, error) { sel := selectorparse.CommonSelector_ExploreAllRecursively - if order.DatamodelPathSelector != nil { + if dps != nil { - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + if strings.HasPrefix(string(*dps), "{") { + var err error + sel, err = selectorparse.ParseJSONSelector(string(*dps)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *dps, err) + } + } else { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) - selspec, err := textselector.SelectorSpecFromPath( + selspec, err := textselector.SelectorSpecFromPath( + textselector.Expression(*dps), matchPath, - *order.DatamodelPathSelector, + ssb.ExploreRecursive( + selector.RecursionLimitNone(), + ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())), + ), + ) + if err != nil { + return nil, xerrors.Errorf("failed to parse text-selector '%s': %w", *dps, err) + } - // URGH - this is a direct copy from https://github.com/filecoin-project/go-fil-markets/blob/v1.12.0/shared/selectors.go#L10-L16 - // Unable to use it because we need the SelectorSpec, and markets exposes just a reified node - ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge()), - ), - ) - if err != nil { - finish(xerrors.Errorf("failed to parse text-selector '%s': %w", *order.DatamodelPathSelector, err)) - return + sel = selspec.Node() + log.Infof("partial retrieval of datamodel-path-selector %s/*", *dps) } - - sel = selspec.Node() - log.Infof("partial retrieval of datamodel-path-selector %s/*", *order.DatamodelPathSelector) } - // summary: - // 1. if we're retrieving from an import, FromLocalCAR will be set. - // Skip the retrieval itself, and use the provided car as a blockstore further down - // to extract a CAR or UnixFS export from. - // 2. if we're using an IPFS blockstore for retrieval, retrieve into it, - // then use the virtual blockstore to extract a CAR or UnixFS export from it. - // 3. if we have to retrieve, perform a CARv2 retrieval, then either - // extract the CARv1 (with ExtractV1File) or use it as a blockstore further down. + return sel, nil +} - // this indicates we're proxying to IPFS. +func (a *API) ClientRetrieve(ctx context.Context, params api.RetrievalOrder) (*api.RestrievalRes, error) { + sel, err := getDataSelector(params.DataSelector, false) + if err != nil { + return nil, err + } + + di, err := a.doRetrieval(ctx, params, sel) + if err != nil { + return nil, err + } + + return &api.RestrievalRes{ + DealID: di, + }, nil +} + +func (a *API) doRetrieval(ctx context.Context, order api.RetrievalOrder, sel datamodel.Node) (rm.DealID, error) { + if order.MinerPeer == nil || order.MinerPeer.ID == "" { + mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) + if err != nil { + return 0, err + } + + order.MinerPeer = &rm.RetrievalPeer{ + ID: *mi.PeerId, + Address: order.Miner, + } + } + + if order.Total.Int == nil { + return 0, xerrors.Errorf("cannot make retrieval deal for null total") + } + + if order.Size == 0 { + return 0, xerrors.Errorf("cannot make retrieval deal for zero bytes") + } + + ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) + + params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) + if err != nil { + return 0, xerrors.Errorf("Error in retrieval params: %s", err) + } + + id := a.Retrieval.NextID() + id, err = a.Retrieval.Retrieve( + ctx, + id, + order.Root, + params, + order.Total, + *order.MinerPeer, + order.Client, + order.Miner, + ) + + if err != nil { + return 0, xerrors.Errorf("Retrieve failed: %w", err) + } + + return id, nil +} + +func (a *API) ClientRetrieveWait(ctx context.Context, deal rm.DealID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + subscribeEvents := make(chan rm.ClientDealState, 1) + + unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { + // We'll check the deal IDs inside consumeAllEvents. + if state.ID != deal { + return + } + select { + case <-ctx.Done(): + case subscribeEvents <- state: + } + }) + defer unsubscribe() + + { + state, err := a.Retrieval.GetDeal(deal) + if err != nil { + return xerrors.Errorf("getting deal state: %w", err) + } + select { + case subscribeEvents <- state: + default: // already have an event queued from the subscription + } + } + + for { + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case state := <-subscribeEvents: + switch state.Status { + case rm.DealStatusCompleted: + return nil + case rm.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) + case + rm.DealStatusDealNotFound, + rm.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", state.Message) + } + } + } +} + +type ExportDest struct { + Writer io.Writer + Path string +} + +func (ed *ExportDest) doWrite(cb func(io.Writer) error) error { + if ed.Writer != nil { + return cb(ed.Writer) + } + + f, err := os.OpenFile(ed.Path, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + + if err := cb(f); err != nil { + _ = f.Close() + return err + } + + return f.Close() +} + +func (a *API) ClientExport(ctx context.Context, exportRef api.ExportRef, ref api.FileRef) error { + return a.ClientExportInto(ctx, exportRef, ref.IsCAR, ExportDest{Path: ref.Path}) +} + +func (a *API) ClientExportInto(ctx context.Context, exportRef api.ExportRef, car bool, dest ExportDest) error { proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor) - carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor) + carPath := exportRef.FromLocalCAR - carPath := order.FromLocalCAR - - // we actually need to retrieve from the network if carPath == "" { - if !retrieveIntoIPFS && !retrieveIntoCAR { - // we don't recognize the blockstore accessor. - finish(xerrors.Errorf("unsupported retrieval blockstore accessor")) - return - } - - if order.MinerPeer == nil || order.MinerPeer.ID == "" { - mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) - if err != nil { - finish(err) - return - } - - order.MinerPeer = &rm.RetrievalPeer{ - ID: *mi.PeerId, - Address: order.Miner, - } - } - - if order.Total.Int == nil { - finish(xerrors.Errorf("cannot make retrieval deal for null total")) - return - } - - if order.Size == 0 { - finish(xerrors.Errorf("cannot make retrieval deal for zero bytes")) - return - } - - ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) - - params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) - if err != nil { - finish(xerrors.Errorf("Error in retrieval params: %s", err)) - return - } - - // Subscribe to events before retrieving to avoid losing events. - subscribeEvents := make(chan retrievalSubscribeEvent, 1) - subscribeCtx, cancel := context.WithCancel(ctx) - defer cancel() - unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { - // We'll check the deal IDs inside consumeAllEvents. - if state.PayloadCID.Equals(order.Root) { - select { - case <-subscribeCtx.Done(): - case subscribeEvents <- retrievalSubscribeEvent{event, state}: - } - } - }) - - id := a.Retrieval.NextID() - id, err = a.Retrieval.Retrieve( - ctx, - id, - order.Root, - params, - order.Total, - *order.MinerPeer, - order.Client, - order.Miner, - ) - - if err != nil { - unsubscribe() - finish(xerrors.Errorf("Retrieve failed: %w", err)) - return - } - - err = consumeAllEvents(ctx, id, subscribeEvents, events) - - unsubscribe() - if err != nil { - finish(xerrors.Errorf("Retrieve: %w", err)) - return + return xerrors.Errorf("unsupported retrieval blockstore accessor") } if retrieveIntoCAR { - carPath = carBss.PathFor(id) + carPath = carBss.PathFor(exportRef.DealID) } } - if ref == nil { - // If ref is nil, it only fetches the data into the configured blockstore - // (if fetching from network). - finish(nil) - return - } - - // determine where did the retrieval go var retrievalBs bstore.Blockstore if retrieveIntoIPFS { retrievalBs = proxyBss.Blockstore } else { cbs, err := stores.ReadOnlyFilestore(carPath) if err != nil { - finish(err) - return + return err } defer cbs.Close() //nolint:errcheck retrievalBs = cbs } + dserv := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) + // Are we outputting a CAR? - if ref.IsCAR { - + if car { // not IPFS and we do full selection - just extract the CARv1 from the CARv2 we stored the retrieval in - if !retrieveIntoIPFS && order.DatamodelPathSelector == nil { - finish(carv2.ExtractV1File(carPath, ref.Path)) - return + if !retrieveIntoIPFS && len(exportRef.DAGs) == 0 && dest.Writer == nil { + return carv2.ExtractV1File(carPath, dest.Path) } - - // generating a CARv1 from the configured blockstore - f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - finish(err) - return - } - - err = car.NewSelectiveCar( - ctx, - retrievalBs, - []car.Dag{{ - Root: order.Root, - Selector: sel, - }}, - car.MaxTraversalLinks(config.MaxTraversalLinks), - ).Write(f) - if err != nil { - finish(err) - return - } - - finish(f.Close()) - return } - // we are extracting a UnixFS file. - ds := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) - root := order.Root + roots, err := parseDagSpec(ctx, exportRef.Root, exportRef.DAGs, dserv, car) + if err != nil { + return xerrors.Errorf("parsing dag spec: %w", err) + } + if car { + return a.outputCAR(ctx, dserv, retrievalBs, exportRef.Root, roots, dest) + } - // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if order.DatamodelPathSelector != nil { + if len(roots) != 1 { + return xerrors.Errorf("unixfs retrieval requires one root node, got %d", len(roots)) + } - var subRootFound bool + return a.outputUnixFS(ctx, roots[0].root, dserv, dest) +} - // no err check - we just compiled this before starting, but now we do not wrap a `*` - selspec, _ := textselector.SelectorSpecFromPath(*order.DatamodelPathSelector, nil) //nolint:errcheck +func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blockstore, root cid.Cid, dags []dagSpec, dest ExportDest) error { + // generating a CARv1 from the configured blockstore + roots := make([]cid.Cid, len(dags)) + for i, dag := range dags { + roots[i] = dag.root + } + + return dest.doWrite(func(w io.Writer) error { + + if err := car.WriteHeader(&car.CarHeader{ + Roots: roots, + Version: 1, + }, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) + } + + cs := cid.NewSet() + + for _, dagSpec := range dags { + if err := utils.TraverseDag( + ctx, + ds, + root, + dagSpec.selector, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + var c cid.Cid + if p.LastBlock.Link == nil { + c = root + } else { + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + if !castOK { + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) + } + + c = cidLnk.Cid + } + + if cs.Visit(c) { + nb, err := bs.Get(ctx, c) + if err != nil { + return xerrors.Errorf("getting block data: %w", err) + } + + err = util.LdWrite(w, c.Bytes(), nb.RawData()) + if err != nil { + return xerrors.Errorf("writing block data: %w", err) + } + } + + return nil + } + return nil + }, + ); err != nil { + return xerrors.Errorf("error while traversing car dag: %w", err) + } + } + + return nil + }) +} + +func (a *API) outputUnixFS(ctx context.Context, root cid.Cid, ds format.DAGService, dest ExportDest) error { + nd, err := ds.Get(ctx, root) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + file, err := unixfile.NewUnixfsFile(ctx, ds, nd) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + + if dest.Writer == nil { + return files.WriteTo(file, dest.Path) + } + + switch f := file.(type) { + case files.File: + _, err = io.Copy(dest.Writer, f) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("file type %T is not supported", nd) + } +} + +type dagSpec struct { + root cid.Cid + selector ipld.Node +} + +func parseDagSpec(ctx context.Context, root cid.Cid, dsp []api.DagSpec, ds format.DAGService, car bool) ([]dagSpec, error) { + if len(dsp) == 0 { + return []dagSpec{ + { + root: root, + selector: nil, + }, + }, nil + } + + out := make([]dagSpec, len(dsp)) + for i, spec := range dsp { + + if spec.DataSelector == nil { + return nil, xerrors.Errorf("invalid DagSpec at position %d: `DataSelector` can not be nil", i) + } + + // reify selector + var err error + out[i].selector, err = getDataSelector(spec.DataSelector, car && spec.ExportMerkleProof) + if err != nil { + return nil, err + } + + // find the pointed-at root node within the containing ds + var rsn ipld.Node + + if strings.HasPrefix(string(*spec.DataSelector), "{") { + var err error + rsn, err = selectorparse.ParseJSONSelector(string(*spec.DataSelector)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *spec.DataSelector, err) + } + } else { + selspec, _ := textselector.SelectorSpecFromPath(textselector.Expression(*spec.DataSelector), car && spec.ExportMerkleProof, nil) //nolint:errcheck + rsn = selspec.Node() + } + + var newRoot cid.Cid + var errHalt = errors.New("halt walk") if err := utils.TraverseDag( ctx, ds, root, - selspec.Node(), + rsn, func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { if r == traversal.VisitReason_SelectionMatch { - - if p.LastBlock.Path.String() != p.Path.String() { + if !car && p.LastBlock.Path.String() != p.Path.String() { return xerrors.Errorf("unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", p.Path.String()) } + if p.LastBlock.Link == nil { + // this is likely the root node that we've matched here + newRoot = root + return errHalt + } + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) } - root = cidLnk.Cid - subRootFound = true + newRoot = cidLnk.Cid + + return errHalt } return nil }, - ); err != nil { - finish(xerrors.Errorf("error while locating partial retrieval sub-root: %w", err)) - return + ); err != nil && err != errHalt { + return nil, xerrors.Errorf("error while locating partial retrieval sub-root: %w", err) } - if !subRootFound { - finish(xerrors.Errorf("path selection '%s' does not match a node within %s", *order.DatamodelPathSelector, root)) - return + if newRoot == cid.Undef { + return nil, xerrors.Errorf("path selection does not match a node within %s", root) } + + out[i].root = newRoot } - nd, err := ds.Get(ctx, root) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return - } - file, err := unixfile.NewUnixfsFile(ctx, ds, nd) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return - } - - finish(files.WriteTo(file, ref.Path)) + return out, nil } func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { @@ -1110,8 +1194,13 @@ func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, er func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { updates := make(chan api.RetrievalInfo) - unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) { - updates <- a.newRetrievalInfo(ctx, deal) + unsub := a.Retrieval.SubscribeToEvents(func(evt rm.ClientEvent, deal rm.ClientDealState) { + update := a.newRetrievalInfo(ctx, deal) + update.Event = &evt + select { + case updates <- update: + case <-ctx.Done(): + } }) go func() { @@ -1196,7 +1285,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet } // check that the data is a car file; if it's not, retrieval won't work - _, _, err = car.ReadHeader(bufio.NewReader(rdr)) + _, err = car.ReadHeader(bufio.NewReader(rdr)) if err != nil { return nil, xerrors.Errorf("not a car file: %w", err) } diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go index 2be87a659..7ad4c9c15 100644 --- a/node/impl/client/client_test.go +++ b/node/impl/client/client_test.go @@ -64,7 +64,7 @@ func TestImportLocal(t *testing.T) { require.NoError(t, err) require.True(t, local) - order := api.RetrievalOrder{ + order := api.ExportRef{ Root: root, FromLocalCAR: it.CARPath, } @@ -72,8 +72,7 @@ func TestImportLocal(t *testing.T) { // retrieve as UnixFS. out1 := filepath.Join(dir, "retrieval1.data") // as unixfs out2 := filepath.Join(dir, "retrieval2.data") // as car - //stm: @CLIENT_RETRIEVAL_RETRIEVE_001 - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out1, }) require.NoError(t, err) @@ -82,7 +81,7 @@ func TestImportLocal(t *testing.T) { require.NoError(t, err) require.Equal(t, b, outBytes) - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out2, IsCAR: true, }) @@ -112,7 +111,7 @@ func TestImportLocal(t *testing.T) { // recreate the unixfs dag, and see if it matches the original file byte by byte // import the car into a memory blockstore, then export the unixfs file. bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - _, err = car.LoadCar(bs, exported.DataReader()) + _, err = car.LoadCar(ctx, bs, exported.DataReader()) require.NoError(t, err) dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) diff --git a/node/impl/full.go b/node/impl/full.go index f9c83ded0..a6f61c305 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -42,7 +42,7 @@ type FullNodeAPI struct { } func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(n.DS, fpath) + return backup(ctx, n.DS, fpath) } func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (status api.NodeStatus, err error) { diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index 4ffbe0e63..ce492f94a 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -99,11 +99,11 @@ func (m *ChainModule) ChainHead(context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { - return a.Chain.GetBlock(msg) + return a.Chain.GetBlock(ctx, msg) } func (m *ChainModule) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return m.Chain.LoadTipSet(key) + return m.Chain.LoadTipSet(ctx, key) } func (m *ChainModule) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) { @@ -111,12 +111,12 @@ func (m *ChainModule) ChainGetPath(ctx context.Context, from, to types.TipSetKey } func (m *ChainModule) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) { - b, err := m.Chain.GetBlock(msg) + b, err := m.Chain.GetBlock(ctx, msg) if err != nil { return nil, err } - bmsgs, smsgs, err := m.Chain.MessagesForBlock(b) + bmsgs, smsgs, err := m.Chain.MessagesForBlock(ctx, b) if err != nil { return nil, err } @@ -143,7 +143,7 @@ func (a *ChainAPI) ChainGetPath(ctx context.Context, from types.TipSetKey, to ty } func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]api.Message, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -154,12 +154,12 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } @@ -176,7 +176,7 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -186,19 +186,19 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } var out []*types.MessageReceipt for i := 0; i < len(cm); i++ { - r, err := a.Chain.GetParentReceipt(b, i) + r, err := a.Chain.GetParentReceipt(ctx, b, i) if err != nil { return nil, err } @@ -210,7 +210,7 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, err } @@ -220,7 +220,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe return nil, nil } - cm, err := a.Chain.MessagesForTipset(ts) + cm, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, err } @@ -237,7 +237,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe } func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -245,7 +245,7 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo } func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -253,7 +253,7 @@ func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.Chain } func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := m.ExposedBlockstore.Get(obj) + blk, err := m.ExposedBlockstore.Get(ctx, obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -262,11 +262,11 @@ func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, er } func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return a.ExposedBlockstore.DeleteBlock(obj) + return a.ExposedBlockstore.DeleteBlock(ctx, obj) } func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return m.ExposedBlockstore.Has(obj) + return m.ExposedBlockstore.Has(ctx, obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { @@ -318,7 +318,7 @@ func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) } func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { - newHeadTs, err := a.Chain.GetTipSetFromKey(tsk) + newHeadTs, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -342,11 +342,11 @@ func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error } } - return a.Chain.SetHead(newHeadTs) + return a.Chain.SetHead(ctx, newHeadTs) } func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { - genb, err := a.Chain.GetGenesis() + genb, err := a.Chain.GetGenesis(ctx) if err != nil { return nil, err } @@ -355,7 +355,7 @@ func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -434,7 +434,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put hamt val: %w", err) } @@ -482,7 +482,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -530,7 +530,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -577,7 +577,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, } func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - cm, err := m.Chain.GetCMessage(mc) + cm, err := m.Chain.GetCMessage(ctx, mc) if err != nil { return nil, err } @@ -586,7 +586,7 @@ func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.M } func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipoldmsgs bool, tsk types.TipSetKey) (<-chan []byte, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index edf53ff63..dd1e8d5ea 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -82,14 +82,14 @@ type GasMeta struct { Limit int64 } -func (g *GasPriceCache) GetTSGasStats(cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { +func (g *GasPriceCache) GetTSGasStats(ctx context.Context, cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { i, has := g.c.Get(ts.Key()) if has { return i.([]GasMeta), nil } var prices []GasMeta - msgs, err := cstore.MessagesForTipset(ts) + msgs, err := cstore.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("loading messages: %w", err) } @@ -173,7 +173,7 @@ func (a *GasAPI) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(a.Chain, a.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, a.Chain, a.PriceCache, nblocksincl) } func (m *GasModule) GasEstimateGasPremium( ctx context.Context, @@ -182,9 +182,9 @@ func (m *GasModule) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(m.Chain, m.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, m.Chain, m.PriceCache, nblocksincl) } -func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { +func gasEstimateGasPremium(ctx context.Context, cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { if nblocksincl == 0 { nblocksincl = 1 } @@ -198,13 +198,13 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc break // genesis } - pts, err := cstore.LoadTipSet(ts.Parents()) + pts, err := cstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return types.BigInt{}, err } blocks += len(pts.Blocks()) - meta, err := cache.GetTSGasStats(cstore, pts) + meta, err := cache.GetTSGasStats(ctx, cstore, pts) if err != nil { return types.BigInt{}, err } @@ -236,14 +236,14 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc } func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn, ts) } func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } @@ -283,7 +283,7 @@ func gasEstimateGasLimit( if err != stmgr.ErrExpensiveFork { break } - ts, err = cstore.GetTipSetFromKey(ts.Parents()) + ts, err = cstore.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return -1, xerrors.Errorf("getting parent tipset: %w", err) } diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index f792cdf99..afff871ca 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -51,11 +51,11 @@ func (a *MpoolAPI) MpoolGetConfig(context.Context) (*types.MpoolConfig, error) { } func (a *MpoolAPI) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { - return a.Mpool.SetConfig(cfg) + return a.Mpool.SetConfig(ctx, cfg) } func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQuality float64) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -64,7 +64,7 @@ func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQ } func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -87,7 +87,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty // different blocks in tipsets of the same height // we exclude messages that have been included in blocks in the mpool tipset - have, err := a.Mpool.MessagesForBlocks(mpts.Blocks()) + have, err := a.Mpool.MessagesForBlocks(ctx, mpts.Blocks()) if err != nil { return nil, xerrors.Errorf("getting messages for base ts: %w", err) } @@ -97,7 +97,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty } } - msgs, err := a.Mpool.MessagesForBlocks(ts.Blocks()) + msgs, err := a.Mpool.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { return nil, xerrors.Errorf(": %w", err) } @@ -115,7 +115,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty return pending, nil } - ts, err = a.Chain.LoadTipSet(ts.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset: %w", err) } diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index 0d20c3f03..edc67ec9e 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -100,7 +100,7 @@ func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src a return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { @@ -127,7 +127,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { @@ -138,7 +138,11 @@ func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params) } -func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { +func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { + return a.msigApproveOrCancelSimple(ctx, api.MsigCancel, msig, txID, src) +} + +func (a *MsigAPI) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params) } diff --git a/node/impl/full/state.go b/node/impl/full/state.go index e251fa3d5..24cc08c8c 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -132,7 +132,7 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad } func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err) } @@ -250,7 +250,7 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, } func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -345,7 +345,7 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres } func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -363,7 +363,7 @@ func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, } func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -372,7 +372,7 @@ func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types. if err != stmgr.ErrExpensiveFork { break } - ts, err = a.Chain.GetTipSetFromKey(ts.Parents()) + ts, err = a.Chain.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("getting parent tipset: %w", err) } @@ -395,17 +395,17 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. msgToReplay = mlkp.Message - executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet) + executionTs, err := a.Chain.GetTipSetFromKey(ctx, mlkp.TipSet) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err) } - ts, err = a.Chain.LoadTipSet(executionTs.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, executionTs.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err) } } else { - ts, err = a.Chain.LoadTipSet(tsk) + ts, err = a.Chain.LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading specified tipset %s: %w", tsk, err) } @@ -433,7 +433,7 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. } func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (a *types.Actor, err error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -441,7 +441,7 @@ func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, } func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -450,7 +450,7 @@ func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, t } func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -459,7 +459,7 @@ func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, } func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -468,7 +468,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts return nil, xerrors.Errorf("getting actor: %w", err) } - blk, err := a.Chain.StateBlockstore().Get(act.Head) + blk, err := a.Chain.StateBlockstore().Get(ctx, act.Head) if err != nil { return nil, xerrors.Errorf("getting actor head: %w", err) } @@ -556,7 +556,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence var returndec interface{} if recpt.ExitCode == 0 && len(recpt.Return) > 0 { - cmsg, err := m.Chain.GetCMessage(msg) + cmsg, err := m.Chain.GetCMessage(ctx, msg) if err != nil { return nil, xerrors.Errorf("failed to load message after successful receipt search: %w", err) } @@ -585,7 +585,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence } func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, msg cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { - fromTs, err := m.Chain.GetTipSetFromKey(tsk) + fromTs, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -607,7 +607,7 @@ func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, m } func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -615,7 +615,7 @@ func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) } func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -623,7 +623,7 @@ func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([] } func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.MarketBalance{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -633,7 +633,7 @@ func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Addre func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { out := map[string]api.MarketBalance{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -673,7 +673,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { out := map[string]api.MarketDeal{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -712,7 +712,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -777,7 +777,7 @@ func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Addre } func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -793,7 +793,7 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A } func (m *StateModule) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -825,7 +825,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre } func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -874,7 +874,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc var out []cid.Cid for ts.Height() >= toheight { - msgs, err := a.Chain.MessagesForTipset(ts) + msgs, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("failed to get messages for tipset (%s): %w", ts.Key(), err) } @@ -889,7 +889,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc break } - next, err := a.Chain.LoadTipSet(ts.Parents()) + next, err := a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next tipset: %w", err) } @@ -901,7 +901,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc } func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -917,7 +917,7 @@ func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs } func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -938,7 +938,7 @@ func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address. } func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MsigVesting, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.EmptyVesting, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -976,12 +976,12 @@ func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Addr } func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - startTs, err := m.Chain.GetTipSetFromKey(start) + startTs, err := m.Chain.GetTipSetFromKey(ctx, start) if err != nil { return types.EmptyInt, xerrors.Errorf("loading start tipset %s: %w", start, err) } - endTs, err := m.Chain.GetTipSetFromKey(end) + endTs, err := m.Chain.GetTipSetFromKey(ctx, end) if err != nil { return types.EmptyInt, xerrors.Errorf("loading end tipset %s: %w", end, err) } @@ -1016,7 +1016,7 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s } func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1053,7 +1053,7 @@ var initialPledgeNum = types.NewInt(110) var initialPledgeDen = types.NewInt(100) func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1114,7 +1114,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { // TODO: this repeats a lot of the previous function. Fix that. - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1190,7 +1190,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr } func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1219,7 +1219,7 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address } func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return false, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1319,7 +1319,7 @@ var dealProviderCollateralDen = types.NewInt(100) // StateDealProviderCollateralBounds returns the min and max collateral a storage provider // can issue. It takes the deal size and verified status as parameters. func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1376,7 +1376,7 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz } func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1397,7 +1397,7 @@ func stateVMCirculatingSupplyInternal( cstore *store.ChainStore, smgr *stmgr.StateManager, ) (api.CirculatingSupply, error) { - ts, err := cstore.GetTipSetFromKey(tsk) + ts, err := cstore.GetTipSetFromKey(ctx, tsk) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1411,7 +1411,7 @@ func stateVMCirculatingSupplyInternal( } func (m *StateModule) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err) } diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 652ae3ecb..bd7de65b1 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -51,25 +51,25 @@ func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) { } func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { - parent, err := a.Syncer.ChainStore().GetBlock(blk.Header.Parents[0]) + parent, err := a.Syncer.ChainStore().GetBlock(ctx, blk.Header.Parents[0]) if err != nil { return xerrors.Errorf("loading parent block: %w", err) } if a.SlashFilter != nil { - if err := a.SlashFilter.MinedBlock(blk.Header, parent.Height); err != nil { + if err := a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) return xerrors.Errorf(" SLASH FILTER ERROR: %w", err) } } // TODO: should we have some sort of fast path to adding a local block? - bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(blk.BlsMessages) + bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(ctx, blk.BlsMessages) if err != nil { return xerrors.Errorf("failed to load bls messages: %w", err) } - smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(blk.SecpkMessages) + smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(ctx, blk.SecpkMessages) if err != nil { return xerrors.Errorf("failed to load secpk message: %w", err) } @@ -137,12 +137,12 @@ func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error } func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { - ts, err := a.Syncer.ChainStore().LoadTipSet(tsk) + ts, err := a.Syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return false, err } - fts, err := a.Syncer.ChainStore().TryFillTipSet(ts) + fts, err := a.Syncer.ChainStore().TryFillTipSet(ctx, ts) if err != nil { return false, err } diff --git a/node/impl/paych/paych.go b/node/impl/paych/paych.go index 773a5efab..df3b1e3e4 100644 --- a/node/impl/paych/paych.go +++ b/node/impl/paych/paych.go @@ -35,11 +35,11 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t } func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFunds(ch) + return a.PaychMgr.AvailableFunds(ctx, ch) } func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFundsByFromTo(from, to) + return a.PaychMgr.AvailableFundsByFromTo(ctx, from, to) } func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { @@ -47,7 +47,7 @@ func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (add } func (a *PaychAPI) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { - return a.PaychMgr.AllocateLane(ch) + return a.PaychMgr.AllocateLane(ctx, ch) } func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) { @@ -60,7 +60,7 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address return nil, err } - lane, err := a.PaychMgr.AllocateLane(ch.Channel) + lane, err := a.PaychMgr.AllocateLane(ctx, ch.Channel) if err != nil { return nil, err } @@ -95,11 +95,11 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address } func (a *PaychAPI) PaychList(ctx context.Context) ([]address.Address, error) { - return a.PaychMgr.ListChannels() + return a.PaychMgr.ListChannels(ctx) } func (a *PaychAPI) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) { - ci, err := a.PaychMgr.GetChannelInfo(pch) + ci, err := a.PaychMgr.GetChannelInfo(ctx, pch) if err != nil { return nil, err } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 39baa97bf..b94097d39 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -300,11 +300,11 @@ func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]stri return out, nil } -func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { +func (sm *StorageMinerAPI) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) { // json can't handle cids as map keys out := map[string][]api.SealedRef{} - refs, err := sm.SectorBlocks.List() + refs, err := sm.SectorBlocks.List(ctx) if err != nil { return nil, err } @@ -948,7 +948,7 @@ func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid. } func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(sm.DS, fpath) + return backup(ctx, sm.DS, fpath) } func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) { diff --git a/node/modules/chain.go b/node/modules/chain.go index 3518c3b29..ff957c9ad 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -30,7 +30,7 @@ import ( ) // ChainBitswap uses a blockstore that bypasses all caches. -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { +func ChainBitswap(lc fx.Lifecycle, mctx helpers.MetricsCtx, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -58,8 +58,8 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty return blockservice.New(bs, rem) } -func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { - mp, err := messagepool.New(mpp, ds, us, nn, j) +func MessagePool(lc fx.Lifecycle, mctx helpers.MetricsCtx, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { + mp, err := messagepool.New(helpers.LifecycleCtx(mctx, lc), mpp, ds, us, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) } @@ -73,23 +73,25 @@ func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Prov } func ChainStore(lc fx.Lifecycle, + mctx helpers.MetricsCtx, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, weight store.WeightFunc, + us stmgr.UpgradeSchedule, j journal.Journal) *store.ChainStore { chain := store.NewChainStore(cbs, sbs, ds, weight, j) - if err := chain.Load(); err != nil { + if err := chain.Load(helpers.LifecycleCtx(mctx, lc)); err != nil { log.Warnf("loading chain state from disk: %s", err) } var startHook func(context.Context) error if ss, ok := basebs.(*splitstore.SplitStore); ok { startHook = func(_ context.Context) error { - err := ss.Start(chain) + err := ss.Start(chain, us) if err != nil { err = xerrors.Errorf("error starting splitstore: %w", err) } diff --git a/node/modules/client.go b/node/modules/client.go index 4d988d98a..86063b8d9 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -40,11 +40,12 @@ import ( "github.com/filecoin-project/lotus/node/impl/full" payapi "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo/imports" ) -func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { +func HandleMigrateClientFunds(lc fx.Lifecycle, mctx helpers.MetricsCtx, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { addr, err := wallet.WalletDefaultAddress(ctx) @@ -52,7 +53,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full if err != nil { return nil } - b, err := ds.Get(datastore.NewKey("/marketfunds/client")) + b, err := ds.Get(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -73,7 +74,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full return nil } - return ds.Delete(datastore.NewKey("/marketfunds/client")) + return ds.Delete(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) }, }) } diff --git a/node/modules/genesis.go b/node/modules/genesis.go index 43443b125..03b4e2907 100644 --- a/node/modules/genesis.go +++ b/node/modules/genesis.go @@ -4,6 +4,8 @@ import ( "bytes" "os" + "go.uber.org/fx" + "github.com/ipfs/go-datastore" "github.com/ipld/go-car" "golang.org/x/xerrors" @@ -11,6 +13,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" ) func ErrorGenesis() Genesis { @@ -19,17 +22,18 @@ func ErrorGenesis() Genesis { } } -func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { - return func(bs dtypes.ChainBlockstore) Genesis { +func LoadGenesis(genBytes []byte) func(fx.Lifecycle, helpers.MetricsCtx, dtypes.ChainBlockstore) Genesis { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, bs dtypes.ChainBlockstore) Genesis { return func() (header *types.BlockHeader, e error) { - c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) + ctx := helpers.LifecycleCtx(mctx, lc) + c, err := car.LoadCar(ctx, bs, bytes.NewReader(genBytes)) if err != nil { return nil, xerrors.Errorf("loading genesis car file failed: %w", err) } if len(c.Roots) != 1 { return nil, xerrors.New("expected genesis file to have one root") } - root, err := bs.Get(c.Roots[0]) + root, err := bs.Get(ctx, c.Roots[0]) if err != nil { return nil, err } @@ -45,8 +49,9 @@ func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { func DoSetGenesis(_ dtypes.AfterGenesisSet) {} -func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { - genFromRepo, err := cs.GetGenesis() +func SetGenesis(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + genFromRepo, err := cs.GetGenesis(ctx) if err == nil { if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { expectedGenesis, err := g() @@ -69,5 +74,5 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) } - return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) + return dtypes.AfterGenesisSet{}, cs.SetGenesis(ctx, genesis) } diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index b0436e9c9..982d9f4cd 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" record "github.com/libp2p/go-libp2p-record" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -33,9 +34,11 @@ type P2PHostIn struct { type RawHost host.Host -func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { - ctx := helpers.LifecycleCtx(mctx, lc) +func Peerstore() (peerstore.Peerstore, error) { + return pstoremem.NewPeerstore() +} +func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { return nil, fmt.Errorf("missing private key for node ID: %s", params.ID.Pretty()) @@ -52,7 +55,7 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, opts = append(opts, o...) } - h, err := libp2p.New(ctx, opts...) + h, err := libp2p.New(opts...) if err != nil { return nil, err } diff --git a/node/modules/lp2p/libp2p.go b/node/modules/lp2p/libp2p.go index 4dee15ae9..5d8ece732 100644 --- a/node/modules/lp2p/libp2p.go +++ b/node/modules/lp2p/libp2p.go @@ -69,7 +69,11 @@ func genLibp2pKey() (crypto.PrivKey, error) { func ConnectionManager(low, high uint, grace time.Duration, protected []string) func() (opts Libp2pOpts, err error) { return func() (Libp2pOpts, error) { - cm := connmgr.NewConnManager(int(low), int(high), grace) + cm, err := connmgr.NewConnManager(int(low), int(high), connmgr.WithGracePeriod(grace)) + if err != nil { + return Libp2pOpts{}, err + } + for _, p := range protected { pid, err := peer.IDFromString(p) if err != nil { diff --git a/node/modules/lp2p/smux.go b/node/modules/lp2p/smux.go index f5c74e18b..608467255 100644 --- a/node/modules/lp2p/smux.go +++ b/node/modules/lp2p/smux.go @@ -2,17 +2,13 @@ package lp2p import ( "os" - "strings" "github.com/libp2p/go-libp2p" - smux "github.com/libp2p/go-libp2p-core/mux" - mplex "github.com/libp2p/go-libp2p-mplex" yamux "github.com/libp2p/go-libp2p-yamux" ) -func makeSmuxTransportOption(mplexExp bool) libp2p.Option { +func makeSmuxTransportOption() libp2p.Option { const yamuxID = "/yamux/1.0.0" - const mplexID = "/mplex/6.7.0" ymxtpt := *yamux.DefaultTransport ymxtpt.AcceptBacklog = 512 @@ -21,34 +17,12 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option { ymxtpt.LogOutput = os.Stderr } - muxers := map[string]smux.Multiplexer{yamuxID: &ymxtpt} - if mplexExp { - muxers[mplexID] = mplex.DefaultTransport - } - - // Allow muxer preference order overriding - order := []string{yamuxID, mplexID} - if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { - order = strings.Fields(prefs) - } - - opts := make([]libp2p.Option, 0, len(order)) - for _, id := range order { - tpt, ok := muxers[id] - if !ok { - log.Warnf("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id) - continue - } - delete(muxers, id) - opts = append(opts, libp2p.Muxer(id, tpt)) - } - - return libp2p.ChainOptions(opts...) + return libp2p.Muxer(yamuxID, &ymxtpt) } -func SmuxTransport(mplex bool) func() (opts Libp2pOpts, err error) { +func SmuxTransport() func() (opts Libp2pOpts, err error) { return func() (opts Libp2pOpts, err error) { - opts.Opts = append(opts.Opts, makeSmuxTransportOption(mplex)) + opts.Opts = append(opts.Opts, makeSmuxTransportOption()) return } } diff --git a/node/modules/services.go b/node/modules/services.go index 17d4a7476..299c01fb0 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -228,8 +228,8 @@ func BuiltinDrandConfig() dtypes.DrandSchedule { return build.DrandConfigSchedule() } -func RandomSchedule(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { - gen, err := p.Cs.GetGenesis() +func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { + gen, err := p.Cs.GetGenesis(helpers.LifecycleCtx(mctx, lc)) if err != nil { return nil, err } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 1a2dfc19f..552f43660 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -32,13 +32,13 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" graphsync "github.com/ipfs/go-graphsync/impl" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" gsnet "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/storeutil" "github.com/libp2p/go-libp2p-core/host" @@ -77,7 +77,7 @@ var ( ) func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) { - maddrb, err := ds.Get(datastore.NewKey("miner-address")) + maddrb, err := ds.Get(context.TODO(), datastore.NewKey("miner-address")) if err != nil { return address.Undef, err } @@ -299,7 +299,7 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - b, err := ds.Get(datastore.NewKey("/marketfunds/provider")) + b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/provider")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -330,7 +330,7 @@ func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api. return nil } - return ds.Delete(datastore.NewKey("/marketfunds/provider")) + return ds.Delete(ctx, datastore.NewKey("/marketfunds/provider")) }, }) } @@ -395,7 +395,7 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe // StagingGraphsync creates a graphsync instance which reads and writes blocks // to the StagingBlockstore -func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { +func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { graphsyncNetwork := gsnet.NewFromLibp2pHost(h) lsys := storeutil.LinkSystemForBlockstore(ibs) @@ -404,9 +404,10 @@ func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRe lsys, graphsync.RejectAllRequestsByDefault(), graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), + graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer), graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), - graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), - graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) + graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), + graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) graphsyncStats(mctx, lc, gs) @@ -683,6 +684,9 @@ func RetrievalProvider( dagStore *dagstore.Wrapper, ) (retrievalmarket.RetrievalProvider, error) { opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) + + retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets + return retrievalimpl.NewProvider( address.Address(maddr), adapter, diff --git a/node/modules/storageminer_dagstore.go b/node/modules/storageminer_dagstore.go index 1f72a49b9..b4f5d3535 100644 --- a/node/modules/storageminer_dagstore.go +++ b/node/modules/storageminer_dagstore.go @@ -11,8 +11,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -25,7 +23,7 @@ const ( ) // NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts. -func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa retrievalmarket.SectorAccessor) (mdagstore.MinerAPI, error) { +func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) { cfg, err := extractDAGStoreConfig(r) if err != nil { return nil, err diff --git a/node/repo/imports/manager.go b/node/repo/imports/manager.go index d972ffb77..2deaa30af 100644 --- a/node/repo/imports/manager.go +++ b/node/repo/imports/manager.go @@ -1,6 +1,7 @@ package imports import ( + "context" "encoding/json" "fmt" "os" @@ -107,6 +108,7 @@ type Meta struct { // CreateImport initializes a new import, returning its ID and optionally a // CAR path where to place the data, if requested. func (m *Manager) CreateImport() (id ID, err error) { + ctx := context.TODO() id = ID(m.counter.Next()) meta := &Meta{Labels: map[LabelKey]LabelValue{ @@ -118,7 +120,7 @@ func (m *Manager) CreateImport() (id ID, err error) { return 0, xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), metajson) + err = m.ds.Put(ctx, id.dsKey(), metajson) if err != nil { return 0, xerrors.Errorf("failed to insert import metadata: %w", err) } @@ -129,7 +131,8 @@ func (m *Manager) CreateImport() (id ID, err error) { // AllocateCAR creates a new CAR allocated to the supplied import under the // root directory. func (m *Manager) AllocateCAR(id ID) (path string, err error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return "", xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -163,14 +166,15 @@ func (m *Manager) AllocateCAR(id ID) (path string, err error) { return "", xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), meta) + err = m.ds.Put(ctx, id.dsKey(), meta) return path, err } // AddLabel adds a label associated with an import, such as the source, // car path, CID, etc. func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -187,14 +191,15 @@ func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { return xerrors.Errorf("marshaling store meta: %w", err) } - return m.ds.Put(id.dsKey(), meta) + return m.ds.Put(ctx, id.dsKey(), meta) } // List returns all import IDs known by this Manager. func (m *Manager) List() ([]ID, error) { + ctx := context.TODO() var keys []ID - qres, err := m.ds.Query(query.Query{KeysOnly: true}) + qres, err := m.ds.Query(ctx, query.Query{KeysOnly: true}) if err != nil { return nil, xerrors.Errorf("query error: %w", err) } @@ -218,7 +223,9 @@ func (m *Manager) List() ([]ID, error) { // Info returns the metadata known to this store for the specified import ID. func (m *Manager) Info(id ID) (*Meta, error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return nil, xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -233,7 +240,8 @@ func (m *Manager) Info(id ID) (*Meta, error) { // Remove drops all data associated with the supplied import ID. func (m *Manager) Remove(id ID) error { - if err := m.ds.Delete(id.dsKey()); err != nil { + ctx := context.TODO() + if err := m.ds.Delete(ctx, id.dsKey()); err != nil { return xerrors.Errorf("removing import metadata: %w", err) } return nil diff --git a/node/rpc.go b/node/rpc.go index 9bcdb7388..6a3e55115 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -27,6 +27,7 @@ import ( "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics/proxy" "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/client" ) var rpclog = logging.Logger("rpc") @@ -89,14 +90,22 @@ func FullNodeHandler(a v1api.FullNode, permissioned bool, opts ...jsonrpc.Server // Import handler handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) + handleExportFunc := handleExport(a.(*impl.FullNodeAPI)) if permissioned { importAH := &auth.Handler{ Verify: a.AuthVerify, Next: handleImportFunc, } m.Handle("/rest/v0/import", importAH) + + exportAH := &auth.Handler{ + Verify: a.AuthVerify, + Next: handleExportFunc, + } + m.Handle("/rest/v0/export", exportAH) } else { m.HandleFunc("/rest/v0/import", handleImportFunc) + m.HandleFunc("/rest/v0/export", handleExportFunc) } // debugging @@ -169,6 +178,34 @@ func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Reque } } +func handleExport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(404) + return + } + if !auth.HasPerm(r.Context(), nil, api.PermWrite) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + + var eref api.ExportRef + if err := json.Unmarshal([]byte(r.FormValue("export")), &eref); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + car := r.FormValue("car") == "true" + + err := a.ClientExportInto(r.Context(), eref, car, client.ExportDest{Writer: w}) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} + func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { return func(rw http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { diff --git a/paychmgr/accessorcache.go b/paychmgr/accessorcache.go index 176fbdd11..358cf7900 100644 --- a/paychmgr/accessorcache.go +++ b/paychmgr/accessorcache.go @@ -1,6 +1,10 @@ package paychmgr -import "github.com/filecoin-project/go-address" +import ( + "context" + + "github.com/filecoin-project/go-address" +) // accessorByFromTo gets a channel accessor for a given from / to pair. // The channel accessor facilitates locking a channel so that operations @@ -36,10 +40,10 @@ func (pm *Manager) accessorByFromTo(from address.Address, to address.Address) (* // The channel accessor facilitates locking a channel so that operations // must be performed sequentially on a channel (but can be performed at // the same time on different channels). -func (pm *Manager) accessorByAddress(ch address.Address) (*channelAccessor, error) { +func (pm *Manager) accessorByAddress(ctx context.Context, ch address.Address) (*channelAccessor, error) { // Get the channel from / to pm.lk.RLock() - channelInfo, err := pm.store.ByAddress(ch) + channelInfo, err := pm.store.ByAddress(ctx, ch) pm.lk.RUnlock() if err != nil { return nil, err diff --git a/paychmgr/manager.go b/paychmgr/manager.go index 460722945..e0fcd7a75 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -92,7 +92,7 @@ func newManager(pchstore *Store, pchapi managerAPI) (*Manager, error) { // Start restarts tracking of any messages that were sent to chain. func (pm *Manager) Start() error { - return pm.restartPending() + return pm.restartPending(pm.ctx) } // Stop shuts down any processes used by the manager @@ -110,27 +110,27 @@ func (pm *Manager) GetPaych(ctx context.Context, from, to address.Address, amt t return chanAccessor.getPaych(ctx, amt) } -func (pm *Manager) AvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } - ci, err := ca.getChannelInfo(ch) + ci, err := ca.getChannelInfo(ctx, ch) if err != nil { return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } -func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { +func (pm *Manager) AvailableFundsByFromTo(ctx context.Context, from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { ca, err := pm.accessorByFromTo(from, to) if err != nil { return nil, err } - ci, err := ca.outboundActiveByFromTo(from, to) + ci, err := ca.outboundActiveByFromTo(ctx, from, to) if err == ErrChannelNotTracked { // If there is no active channel between from / to we still want to // return an empty ChannelAvailableFunds, so that clients can check @@ -151,7 +151,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } // GetPaychWaitReady waits until the create channel / add funds message with the @@ -160,7 +160,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { // Find the channel associated with the message CID pm.lk.Lock() - ci, err := pm.store.ByMessageCid(mcid) + ci, err := pm.store.ByMessageCid(ctx, mcid) pm.lk.Unlock() if err != nil { @@ -178,25 +178,25 @@ func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address return chanAccessor.getPaychWaitReady(ctx, mcid) } -func (pm *Manager) ListChannels() ([]address.Address, error) { +func (pm *Manager) ListChannels(ctx context.Context) ([]address.Address, error) { // Need to take an exclusive lock here so that channel operations can't run // in parallel (see channelLock) pm.lk.Lock() defer pm.lk.Unlock() - return pm.store.ListChannels() + return pm.store.ListChannels(ctx) } -func (pm *Manager) GetChannelInfo(addr address.Address) (*ChannelInfo, error) { - ca, err := pm.accessorByAddress(addr) +func (pm *Manager) GetChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return nil, err } - return ca.getChannelInfo(addr) + return ca.getChannelInfo(ctx, addr) } func (pm *Manager) CreateVoucher(ctx context.Context, ch address.Address, voucher paych.SignedVoucher) (*api.VoucherCreateResult, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -223,7 +223,7 @@ func (pm *Manager) CheckVoucherSpendable(ctx context.Context, ch address.Address if len(proof) > 0 { return false, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return false, err } @@ -237,7 +237,7 @@ func (pm *Manager) AddVoucherOutbound(ctx context.Context, ch address.Address, s if len(proof) > 0 { return types.NewInt(0), errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return types.NewInt(0), err } @@ -283,7 +283,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) defer pm.lk.Unlock() // Check if channel is in store - ci, err := pm.store.ByAddress(ch) + ci, err := pm.store.ByAddress(ctx, ch) if err == nil { // Channel is in store, so it's already being tracked return ci, nil @@ -316,7 +316,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) } // Save channel to store - return pm.store.TrackChannel(stateCi) + return pm.store.TrackChannel(ctx, stateCi) } // TODO: secret vs proof doesn't make sense, there is only one, not two @@ -324,23 +324,23 @@ func (pm *Manager) SubmitVoucher(ctx context.Context, ch address.Address, sv *pa if len(proof) > 0 { return cid.Undef, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return cid.Undef, err } return ca.submitVoucher(ctx, ch, sv, secret) } -func (pm *Manager) AllocateLane(ch address.Address) (uint64, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return 0, err } - return ca.allocateLane(ch) + return ca.allocateLane(ctx, ch) } func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -348,7 +348,7 @@ func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*Vou } func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } @@ -356,7 +356,7 @@ func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, e } func (pm *Manager) Collect(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych.go b/paychmgr/paych.go index e5e47dfca..16c6604c6 100644 --- a/paychmgr/paych.go +++ b/paychmgr/paych.go @@ -95,18 +95,18 @@ func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Addr return paych.Message(av, from), nil } -func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) getChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.ByAddress(addr) + return ca.store.ByAddress(ctx, addr) } -func (ca *channelAccessor) outboundActiveByFromTo(from, to address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) outboundActiveByFromTo(ctx context.Context, from, to address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.OutboundActiveByFromTo(from, to) + return ca.store.OutboundActiveByFromTo(ctx, from, to) } // createVoucher creates a voucher with the given specification, setting its @@ -118,7 +118,7 @@ func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address defer ca.lk.Unlock() // Find the channel for the voucher - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return nil, xerrors.Errorf("failed to get channel info by address: %w", err) } @@ -229,7 +229,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add } // Check the voucher against the highest known voucher nonce / value - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -298,7 +298,7 @@ func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address return false, err } - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return false, err } @@ -351,7 +351,7 @@ func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, s } func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta types.BigInt) (types.BigInt, error) { - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return types.BigInt{}, err } @@ -400,14 +400,14 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad ci.NextLane = sv.Lane + 1 } - return delta, ca.store.putChannelInfo(ci) + return delta, ca.store.putChannelInfo(ctx, ci) } func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -453,7 +453,7 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address } // Mark the voucher and any lower-nonce vouchers as having been submitted - err = ca.store.MarkVoucherSubmitted(ci, sv) + err = ca.store.MarkVoucherSubmitted(ctx, ci, sv) if err != nil { return cid.Undef, err } @@ -461,11 +461,11 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address return smsg.Cid(), nil } -func (ca *channelAccessor) allocateLane(ch address.Address) (uint64, error) { +func (ca *channelAccessor) allocateLane(ctx context.Context, ch address.Address) (uint64, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.AllocateLane(ch) + return ca.store.AllocateLane(ctx, ch) } func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { @@ -474,12 +474,12 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) // TODO: just having a passthrough method like this feels odd. Seems like // there should be some filtering we're doing here - return ca.store.VouchersForPaych(ch) + return ca.store.VouchersForPaych(ctx, ch) } // laneState gets the LaneStates from chain, then applies all vouchers in // the data store over the chain state -func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { +func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct // (but technically dont't need to) @@ -501,7 +501,7 @@ func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map } // Apply locally stored vouchers - vouchers, err := ca.store.VouchersForPaych(ch) + vouchers, err := ca.store.VouchersForPaych(ctx, ch) if err != nil && err != ErrChannelNotTracked { return nil, err } @@ -583,7 +583,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -602,7 +602,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. } ci.Settling = true - err = ca.store.putChannelInfo(ci) + err = ca.store.putChannelInfo(ctx, ci) if err != nil { log.Errorf("Error marking channel as settled: %s", err) } @@ -614,7 +614,7 @@ func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index d6c0b3a65..e798ba585 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -360,17 +360,17 @@ func TestAddVoucherNextLane(t *testing.T) { _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err := s.mgr.GetChannelInfo(s.ch) + ci, err := s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 3) //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate a lane (should be lane 3) - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 3) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -380,7 +380,7 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -390,23 +390,25 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 8) } func TestAllocateLane(t *testing.T) { + ctx := context.Background() + // Set up a manager with a single payment channel s := testSetupMgrWithChannel(t) //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // First lane should be 0 - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 0) // Next lane should be 1 - lane, err = s.mgr.AllocateLane(s.ch) + lane, err = s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 1) } @@ -456,7 +458,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) { //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate lane should return the next lane (lane 3) - lane, err := mgr.AllocateLane(ch) + lane, err := mgr.AllocateLane(ctx, ch) require.NoError(t, err) require.EqualValues(t, 3, lane) } @@ -759,7 +761,7 @@ func testSetupMgrWithChannel(t *testing.T) *testScaffold { Target: toAcct, Direction: DirOutbound, } - err = mgr.store.putChannelInfo(ci) + err = mgr.store.putChannelInfo(context.Background(), ci) require.NoError(t, err) // Add the from signing key to the wallet diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index c06d47bd2..c9dc48b05 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -88,7 +88,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -113,7 +113,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -122,7 +122,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -136,13 +136,13 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should still have one channel - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Channel amount should include last amount sent to GetPaych - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 15, ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -205,12 +205,12 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) }() @@ -262,12 +262,12 @@ func TestPaychGetRecoverAfterError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -315,12 +315,12 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.Error(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -342,13 +342,13 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt3.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -389,7 +389,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -414,7 +414,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -423,7 +423,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -487,13 +487,13 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt2.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -930,7 +930,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // No channel created yet so available funds should be all zeroes - av, err := mgr.AvailableFundsByFromTo(from, to) + av, err := mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -945,7 +945,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should reflect create channel message sent - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) @@ -974,7 +974,7 @@ func TestPaychAvailableFunds(t *testing.T) { waitForQueueSize(t, mgr, from, to, 1) // Available funds should now include queued funds - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -1009,7 +1009,7 @@ func TestPaychAvailableFunds(t *testing.T) { // Available funds should now include the channel and also a wait sentinel // for the add funds message - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -1031,7 +1031,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should no longer have a wait sentinel - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -1052,7 +1052,7 @@ func TestPaychAvailableFunds(t *testing.T) { _, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, types.NewInt(0)) require.NoError(t, err) - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go index d324c10c8..4d2393e96 100644 --- a/paychmgr/settle_test.go +++ b/paychmgr/settle_test.go @@ -64,7 +64,7 @@ func TestPaychSettle(t *testing.T) { require.NotEqual(t, ch, ch2) // There should now be two channels - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 2) } diff --git a/paychmgr/simple.go b/paychmgr/simple.go index f93c6d5bd..502338e29 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -159,7 +159,7 @@ func (m *mergedFundsReq) sum() types.BigInt { func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (address.Address, cid.Cid, error) { // Add the request to add funds to a queue and wait for the result freq := newFundsReq(ctx, amt) - ca.enqueue(freq) + ca.enqueue(ctx, freq) select { case res := <-freq.promise: return res.channel, res.mcid, res.err @@ -170,16 +170,16 @@ func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (addr } // Queue up an add funds operation -func (ca *channelAccessor) enqueue(task *fundsReq) { +func (ca *channelAccessor) enqueue(ctx context.Context, task *fundsReq) { ca.lk.Lock() defer ca.lk.Unlock() ca.fundsReqQueue = append(ca.fundsReqQueue, task) - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } // Run the operations in the queue -func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) processQueue(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { ca.lk.Lock() defer ca.lk.Unlock() @@ -188,7 +188,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // If there's nothing in the queue, bail out if len(ca.fundsReqQueue) == 0 { - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // Merge all pending requests into one. @@ -199,7 +199,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if amt.IsZero() { // Note: The amount can be zero if requests are cancelled as we're // building the mergedFundsReq - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } res := ca.processTask(merged.ctx, amt) @@ -209,7 +209,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if res == nil { // Stop processing the fundsReqQueue and wait. When the event occurs it will // call processQueue() again - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } // Finished processing so clear the queue @@ -218,7 +218,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // Call the task callback with its results merged.onComplete(res) - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // filterQueue filters cancelled requests out of the queue @@ -255,12 +255,12 @@ func (ca *channelAccessor) queueSize() int { // msgWaitComplete is called when the message for a previous task is confirmed // or there is an error. -func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { +func (ca *channelAccessor) msgWaitComplete(ctx context.Context, mcid cid.Cid, err error) { ca.lk.Lock() defer ca.lk.Unlock() // Save the message result to the store - dserr := ca.store.SaveMessageResult(mcid, err) + dserr := ca.store.SaveMessageResult(ctx, mcid, err) if dserr != nil { log.Errorf("saving message result: %s", dserr) } @@ -271,16 +271,16 @@ func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { // The queue may have been waiting for msg completion to proceed, so // process the next queue item if len(ca.fundsReqQueue) > 0 { - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } } -func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) currentAvailableFunds(ctx context.Context, channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { if len(channelID) == 0 { return nil, nil } - channelInfo, err := ca.store.ByChannelID(channelID) + channelInfo, err := ca.store.ByChannelID(ctx, channelID) if err != nil { return nil, err } @@ -302,7 +302,7 @@ func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt typ return nil, err } - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -337,7 +337,7 @@ func (ca *channelAccessor) processTask(ctx context.Context, amt types.BigInt) *p // Get the payment channel for the from/to addresses. // Note: It's ok if we get ErrChannelNotTracked. It just means we need to // create a channel. - channelInfo, err := ca.store.OutboundActiveByFromTo(ca.from, ca.to) + channelInfo, err := ca.store.OutboundActiveByFromTo(ctx, ca.from, ca.to) if err != nil && err != ErrChannelNotTracked { return &paychFundsRes{err: err} } @@ -393,26 +393,26 @@ func (ca *channelAccessor) createPaych(ctx context.Context, amt types.BigInt) (c mcid := smsg.Cid() // Create a new channel in the store - ci, err := ca.store.CreateChannel(ca.from, ca.to, mcid, amt) + ci, err := ca.store.CreateChannel(ctx, ca.from, ca.to, mcid, amt) if err != nil { log.Errorf("creating channel: %s", err) return cid.Undef, err } // Wait for the channel to be created on chain - go ca.waitForPaychCreateMsg(ci.ChannelID, mcid) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, mcid) return mcid, nil } // waitForPaychCreateMsg waits for mcid to appear on chain and stores the robust address of the // created payment channel -func (ca *channelAccessor) waitForPaychCreateMsg(channelID string, mcid cid.Cid) { - err := ca.waitPaychCreateMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitPaychCreateMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Errorf("wait msg: %v", err) @@ -425,7 +425,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Channel creation failed, so remove the channel from the datastore - dserr := ca.store.RemoveChannel(channelID) + dserr := ca.store.RemoveChannel(ctx, channelID) if dserr != nil { log.Errorf("failed to remove channel %s: %s", channelID, dserr) } @@ -449,7 +449,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Store robust address of channel - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Channel = &decodedReturn.RobustAddress channelInfo.Amount = channelInfo.PendingAmount channelInfo.PendingAmount = big.NewInt(0) @@ -475,30 +475,30 @@ func (ca *channelAccessor) addFunds(ctx context.Context, channelInfo *ChannelInf mcid := smsg.Cid() // Store the add funds message CID on the channel - ca.mutateChannelInfo(channelInfo.ChannelID, func(ci *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *ChannelInfo) { ci.PendingAmount = amt ci.AddFundsMsg = &mcid }) // Store a reference from the message CID to the channel, so that we can // look up the channel from the message CID - err = ca.store.SaveNewMessage(channelInfo.ChannelID, mcid) + err = ca.store.SaveNewMessage(ctx, channelInfo.ChannelID, mcid) if err != nil { log.Errorf("saving add funds message CID %s: %s", mcid, err) } - go ca.waitForAddFundsMsg(channelInfo.ChannelID, mcid) + go ca.waitForAddFundsMsg(ctx, channelInfo.ChannelID, mcid) return &mcid, nil } // waitForAddFundsMsg waits for mcid to appear on chain and returns error, if any -func (ca *channelAccessor) waitForAddFundsMsg(channelID string, mcid cid.Cid) { - err := ca.waitAddFundsMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitAddFundsMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Error(err) @@ -512,7 +512,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error ca.lk.Lock() defer ca.lk.Unlock() - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil }) @@ -524,7 +524,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error defer ca.lk.Unlock() // Store updated amount - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Amount = types.BigAdd(channelInfo.Amount, channelInfo.PendingAmount) channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil @@ -534,8 +534,8 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error } // Change the state of the channel in the store -func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*ChannelInfo)) { - channelInfo, err := ca.store.ByChannelID(channelID) +func (ca *channelAccessor) mutateChannelInfo(ctx context.Context, channelID string, mutate func(*ChannelInfo)) { + channelInfo, err := ca.store.ByChannelID(ctx, channelID) // If there's an error reading or writing to the store just log an error. // For now we're assuming it's unlikely to happen in practice. @@ -549,7 +549,7 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan mutate(channelInfo) - err = ca.store.putChannelInfo(channelInfo) + err = ca.store.putChannelInfo(ctx, channelInfo) if err != nil { log.Errorf("Error writing channel info to store: %s", err) } @@ -560,8 +560,8 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan // messages. // Outstanding messages can occur if a create / add funds message was sent and // then the system was shut down or crashed before the result was received. -func (pm *Manager) restartPending() error { - cis, err := pm.store.WithPendingAddFunds() +func (pm *Manager) restartPending(ctx context.Context) error { + cis, err := pm.store.WithPendingAddFunds(ctx) if err != nil { return err } @@ -575,16 +575,16 @@ func (pm *Manager) restartPending() error { if err != nil { return xerrors.Errorf("error initializing payment channel manager %s -> %s: %s", ci.Control, ci.Target, err) } - go ca.waitForPaychCreateMsg(ci.ChannelID, *ci.CreateMsg) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, *ci.CreateMsg) return nil }) } else if ci.AddFundsMsg != nil { group.Go(func() error { - ca, err := pm.accessorByAddress(*ci.Channel) + ca, err := pm.accessorByAddress(ctx, *ci.Channel) if err != nil { return xerrors.Errorf("error initializing payment channel manager %s: %s", ci.Channel, err) } - go ca.waitForAddFundsMsg(ci.ChannelID, *ci.AddFundsMsg) + go ca.waitForAddFundsMsg(ctx, ci.ChannelID, *ci.AddFundsMsg) return nil }) } @@ -598,7 +598,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Lock() // First check if the message has completed - msgInfo, err := ca.store.GetMessage(mcid) + msgInfo, err := ca.store.GetMessage(ctx, mcid) if err != nil { ca.lk.Unlock() @@ -617,7 +617,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Unlock() // Get the channel address - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { return address.Undef, err } @@ -660,7 +660,7 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on res := onMsgRes{err: err} if res.err == nil { // Get the channel associated with the message cid - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { res.err = err } else { @@ -689,6 +689,6 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on return promise } -func (ca *channelAccessor) availableFunds(channelID string) (*api.ChannelAvailableFunds, error) { - return ca.processQueue(channelID) +func (ca *channelAccessor) availableFunds(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { + return ca.processQueue(ctx, channelID) } diff --git a/paychmgr/store.go b/paychmgr/store.go index 343149f93..62849e6be 100644 --- a/paychmgr/store.go +++ b/paychmgr/store.go @@ -2,6 +2,7 @@ package paychmgr import ( "bytes" + "context" "errors" "fmt" @@ -157,26 +158,26 @@ func (ci *ChannelInfo) wasVoucherSubmitted(sv *paych.SignedVoucher) (bool, error // TrackChannel stores a channel, returning an error if the channel was already // being tracked -func (ps *Store) TrackChannel(ci *ChannelInfo) (*ChannelInfo, error) { - _, err := ps.ByAddress(*ci.Channel) +func (ps *Store) TrackChannel(ctx context.Context, ci *ChannelInfo) (*ChannelInfo, error) { + _, err := ps.ByAddress(ctx, *ci.Channel) switch err { default: return nil, err case nil: return nil, fmt.Errorf("already tracking channel: %s", ci.Channel) case ErrChannelNotTracked: - err = ps.putChannelInfo(ci) + err = ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } - return ps.ByAddress(*ci.Channel) + return ps.ByAddress(ctx, *ci.Channel) } } // ListChannels returns the addresses of all channels that have been created -func (ps *Store) ListChannels() ([]address.Address, error) { - cis, err := ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) ListChannels(ctx context.Context) ([]address.Address, error) { + cis, err := ps.findChans(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil }, 0) if err != nil { @@ -193,8 +194,8 @@ func (ps *Store) ListChannels() ([]address.Address, error) { // findChan finds a single channel using the given filter. // If there isn't a channel that matches the filter, returns ErrChannelNotTracked -func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { - cis, err := ps.findChans(filter, 1) +func (ps *Store) findChan(ctx context.Context, filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { + cis, err := ps.findChans(ctx, filter, 1) if err != nil { return nil, err } @@ -208,8 +209,8 @@ func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, erro // findChans loops over all channels, only including those that pass the filter. // max is the maximum number of channels to return. Set to zero to return unlimited channels. -func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyChannelInfo}) +func (ps *Store) findChans(ctx context.Context, filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyChannelInfo}) if err != nil { return nil, err } @@ -251,8 +252,8 @@ func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelIn } // AllocateLane allocates a new lane for the given channel -func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return 0, err } @@ -260,12 +261,12 @@ func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { out := ci.NextLane ci.NextLane++ - return out, ps.putChannelInfo(ci) + return out, ps.putChannelInfo(ctx, ci) } // VouchersForPaych gets the vouchers for the given channel -func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) VouchersForPaych(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return nil, err } @@ -273,17 +274,17 @@ func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { return ci.Vouchers, nil } -func (ps *Store) MarkVoucherSubmitted(ci *ChannelInfo, sv *paych.SignedVoucher) error { +func (ps *Store) MarkVoucherSubmitted(ctx context.Context, ci *ChannelInfo, sv *paych.SignedVoucher) error { err := ci.markVoucherSubmitted(sv) if err != nil { return err } - return ps.putChannelInfo(ci) + return ps.putChannelInfo(ctx, ci) } // ByAddress gets the channel that matches the given address -func (ps *Store) ByAddress(addr address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) ByAddress(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil && *ci.Channel == addr }) } @@ -307,7 +308,7 @@ func dskeyForMsg(mcid cid.Cid) datastore.Key { } // SaveNewMessage is called when a message is sent -func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { +func (ps *Store) SaveNewMessage(ctx context.Context, channelID string, mcid cid.Cid) error { k := dskeyForMsg(mcid) b, err := cborrpc.Dump(&MsgInfo{ChannelID: channelID, MsgCid: mcid}) @@ -315,12 +316,12 @@ func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // SaveMessageResult is called when the result of a message is received -func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) SaveMessageResult(ctx context.Context, mcid cid.Cid, msgErr error) error { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return err } @@ -336,17 +337,17 @@ func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // ByMessageCid gets the channel associated with a message -func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) ByMessageCid(ctx context.Context, mcid cid.Cid) (*ChannelInfo, error) { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return nil, err } - ci, err := ps.findChan(func(ci *ChannelInfo) bool { + ci, err := ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.ChannelID == minfo.ChannelID }) if err != nil { @@ -357,10 +358,10 @@ func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { } // GetMessage gets the message info for a given message CID -func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { +func (ps *Store) GetMessage(ctx context.Context, mcid cid.Cid) (*MsgInfo, error) { k := dskeyForMsg(mcid) - val, err := ps.ds.Get(k) + val, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -375,8 +376,8 @@ func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { // OutboundActiveByFromTo looks for outbound channels that have not been // settled, with the given from / to addresses -func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) OutboundActiveByFromTo(ctx context.Context, from address.Address, to address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -390,8 +391,8 @@ func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address // WithPendingAddFunds is used on startup to find channels for which a // create channel or add funds message has been sent, but lotus shut down // before the response was received. -func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { - return ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) WithPendingAddFunds(ctx context.Context) ([]ChannelInfo, error) { + return ps.findChans(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -400,10 +401,10 @@ func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { } // ByChannelID gets channel info by channel ID -func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { +func (ps *Store) ByChannelID(ctx context.Context, channelID string) (*ChannelInfo, error) { var stored ChannelInfo - res, err := ps.ds.Get(dskeyForChannel(channelID)) + res, err := ps.ds.Get(ctx, dskeyForChannel(channelID)) if err != nil { if err == datastore.ErrNotFound { return nil, ErrChannelNotTracked @@ -415,7 +416,7 @@ func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { } // CreateChannel creates an outbound channel for the given from / to -func (ps *Store) CreateChannel(from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { +func (ps *Store) CreateChannel(ctx context.Context, from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { ci := &ChannelInfo{ Direction: DirOutbound, NextLane: 0, @@ -426,13 +427,13 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // Save the new channel - err := ps.putChannelInfo(ci) + err := ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } // Save a reference to the create message - err = ps.SaveNewMessage(ci.ChannelID, createMsgCid) + err = ps.SaveNewMessage(ctx, ci.ChannelID, createMsgCid) if err != nil { return nil, err } @@ -441,8 +442,8 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // RemoveChannel removes the channel with the given channel ID -func (ps *Store) RemoveChannel(channelID string) error { - return ps.ds.Delete(dskeyForChannel(channelID)) +func (ps *Store) RemoveChannel(ctx context.Context, channelID string) error { + return ps.ds.Delete(ctx, dskeyForChannel(channelID)) } // The datastore key used to identify the channel info @@ -451,7 +452,7 @@ func dskeyForChannel(channelID string) datastore.Key { } // putChannelInfo stores the channel info in the datastore -func (ps *Store) putChannelInfo(ci *ChannelInfo) error { +func (ps *Store) putChannelInfo(ctx context.Context, ci *ChannelInfo) error { if len(ci.ChannelID) == 0 { ci.ChannelID = uuid.New().String() } @@ -462,7 +463,7 @@ func (ps *Store) putChannelInfo(ci *ChannelInfo) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // TODO: This is a hack to get around not being able to CBOR marshall a nil diff --git a/paychmgr/store_test.go b/paychmgr/store_test.go index 1ec8895fa..563b82978 100644 --- a/paychmgr/store_test.go +++ b/paychmgr/store_test.go @@ -1,6 +1,7 @@ package paychmgr import ( + "context" "testing" "github.com/filecoin-project/go-address" @@ -12,8 +13,10 @@ import ( ) func TestStore(t *testing.T) { + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) - addrs, err := store.ListChannels() + addrs, err := store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 0) @@ -38,19 +41,19 @@ func TestStore(t *testing.T) { } // Track the channel - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.NoError(t, err) // Tracking same channel again should error - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.Error(t, err) // Track another channel - _, err = store.TrackChannel(ci2) + _, err = store.TrackChannel(ctx, ci2) require.NoError(t, err) // List channels should include all channels - addrs, err = store.ListChannels() + addrs, err = store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 2) t0100, err := address.NewIDAddress(100) @@ -61,25 +64,25 @@ func TestStore(t *testing.T) { require.Contains(t, addrs, t0200) // Request vouchers for channel - vouchers, err := store.VouchersForPaych(*ci.Channel) + vouchers, err := store.VouchersForPaych(ctx, *ci.Channel) require.NoError(t, err) require.Len(t, vouchers, 1) // Requesting voucher for non-existent channel should error - _, err = store.VouchersForPaych(tutils.NewIDAddr(t, 300)) + _, err = store.VouchersForPaych(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) // Allocate lane for channel - lane, err := store.AllocateLane(*ci.Channel) + lane, err := store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(0)) // Allocate next lane for channel - lane, err = store.AllocateLane(*ci.Channel) + lane, err = store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(1)) // Allocate next lane for non-existent channel should error - _, err = store.AllocateLane(tutils.NewIDAddr(t, 300)) + _, err = store.AllocateLane(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) } diff --git a/scripts/archive-branches.sh b/scripts/archive-branches.sh index 98fdfaeb8..7eb680641 100755 --- a/scripts/archive-branches.sh +++ b/scripts/archive-branches.sh @@ -9,10 +9,12 @@ api_repo="repos/$org/$repo" exclusions=( 'master' + 'main' + 'releases' ) gh_api_next() { - links=$(grep '^Link:' | sed -e 's/Link: //' -e 's/, /\n/g') + links=$(grep '^link:' | sed -e 's/link: //' -e 's/, /\n/g') echo "$links" | grep '; rel="next"' >/dev/null || return link=$(echo "$links" | grep '; rel="next"' | sed -e 's/^.*//') @@ -43,7 +45,7 @@ active_branches() { git remote add archived "git@github.com:$arch_repo.git" || true -branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13)" +branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13 | grep -v -e '^release/' -e '^ntwk-')" echo "================" printf "%s\n" "$branches_to_move" diff --git a/scripts/docker-lotus-miner-entrypoint.sh b/scripts/docker-lotus-miner-entrypoint.sh index 8cdbaecce..a8f2a5540 100755 --- a/scripts/docker-lotus-miner-entrypoint.sh +++ b/scripts/docker-lotus-miner-entrypoint.sh @@ -1,19 +1,24 @@ #!/usr/bin/env bash if [ ! -z $DOCKER_LOTUS_MINER_INIT ]; then - GATE="$LOTUS_PATH"/date_initialized + GATE="${LOTUS_MINER_PATH}/date_initialized" # Don't init if already initialized. - if [ -f "$GATE" ]; then + if [ ! -f "${GATE}" ]; then + echo starting init + eval "/usr/local/bin/lotus-miner init ${DOCKER_LOTUS_MINER_INIT_ARGS}" + if [ $? == 0 ] + then + echo lotus-miner init successful + date > "$GATE" + else + echo lotus-miner init unsuccessful + exit 1 + fi + else echo lotus-miner already initialized. - exit 0 fi - echo starting init - /usr/local/bin/lotus-miner init - - # Block future inits - date > "$GATE" fi exec /usr/local/bin/lotus-miner $@ diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 1d33687ae..7999603b2 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -31,12 +31,11 @@ def generate_lotus_cli(prog): if cmd_flag is True and line == '': cmd_flag = False if cmd_flag is True and line[-1] != ':' and 'help, h' not in line: - gap_pos = 0 + gap_pos = None sub_cmd = line if ' ' in line: gap_pos = sub_cmd.index(' ') - if gap_pos: - sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] + sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] get_cmd_recursively(sub_cmd) except Exception as e: print('Fail to deal with "%s" with error:\n%s' % (line, e)) diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index ad4ffc0db..231809a9f 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -68,11 +68,11 @@ func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { return sbc } -func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { +func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { st.keyLk.Lock() // TODO: make this multithreaded defer st.keyLk.Unlock() - v, err := st.keys.Get(DealIDToDsKey(dealID)) + v, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = nil } @@ -97,7 +97,7 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o if err != nil { return xerrors.Errorf("serializing refs: %w", err) } - return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow + return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { @@ -107,7 +107,7 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, so.Sector, so.Offset, size) + err = st.writeRef(ctx, d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } @@ -115,8 +115,8 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize return so.Sector, so.Offset, nil } -func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { - res, err := st.keys.Query(query.Query{}) +func (st *SectorBlocks) List(ctx context.Context) (map[uint64][]api.SealedRef, error) { + res, err := st.keys.Query(ctx, query.Query{}) if err != nil { return nil, err } @@ -144,8 +144,8 @@ func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { return out, nil } -func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors - ent, err := st.keys.Get(DealIDToDsKey(dealID)) +func (st *SectorBlocks) GetRefs(ctx context.Context, dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors + ent, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = ErrNotFound } @@ -161,8 +161,8 @@ func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // return refs.Refs, nil } -func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { - refs, err := st.GetRefs(dealID) +func (st *SectorBlocks) GetSize(ctx context.Context, dealID abi.DealID) (uint64, error) { + refs, err := st.GetRefs(ctx, dealID) if err != nil { return 0, err } @@ -170,7 +170,7 @@ func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { return uint64(refs[0].Size), nil } -func (st *SectorBlocks) Has(dealID abi.DealID) (bool, error) { +func (st *SectorBlocks) Has(ctx context.Context, dealID abi.DealID) (bool, error) { // TODO: ensure sector is still there - return st.keys.Has(DealIDToDsKey(dealID)) + return st.keys.Has(ctx, DealIDToDsKey(dealID)) } diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 7b80f2744..9540182b5 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -15,7 +15,7 @@ import ( const ( SubmitConfidence = 4 - ChallengeConfidence = 10 + ChallengeConfidence = 1 ) type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 78d9431d4..9ece295ca 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -5,6 +5,8 @@ import ( "context" "testing" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" @@ -22,12 +24,6 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -35,6 +31,10 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/journal" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) type mockStorageMinerAPI struct { @@ -149,7 +149,11 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (m mockVerif) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { panic("implement me") } diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 88357c5b3..53801e362 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -115,6 +115,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { } gotCur = false + log.Info("restarting window post scheduler") } select { diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index 7f4a53630..e7d03c23b 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -3,16 +3,16 @@ module github.com/filecoin-project/lotus/testplans/lotus-soup go 1.16 require ( - contrib.go.opencensus.io/exporter/prometheus v0.1.0 + contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/davecgh/go-spew v1.1.1 - github.com/drand/drand v1.2.1 - github.com/filecoin-project/go-address v0.0.5 - github.com/filecoin-project/go-data-transfer v1.10.1 - github.com/filecoin-project/go-fil-markets v1.12.0 - github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec - github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/drand/drand v1.3.0 + github.com/filecoin-project/go-address v0.0.6 + github.com/filecoin-project/go-data-transfer v1.12.0 + github.com/filecoin-project/go-fil-markets v1.13.5 + github.com/filecoin-project/go-jsonrpc v0.1.5 + github.com/filecoin-project/go-state-types v0.1.1 + github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/lotus v0.0.0-00010101000000-000000000000 github.com/filecoin-project/specs-actors v0.9.14 github.com/google/uuid v1.3.0 @@ -20,18 +20,18 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/influxdata/influxdb v1.9.4 // indirect github.com/ipfs/go-cid v0.1.0 - github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-unixfs v0.2.6 - github.com/ipld/go-car v0.3.1-null-padded-files + github.com/ipld/go-car v0.3.3 github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-core v0.9.0 + github.com/libp2p/go-libp2p v0.17.0 + github.com/libp2p/go-libp2p-core v0.13.0 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 - github.com/multiformats/go-multiaddr v0.4.0 + github.com/multiformats/go-multiaddr v0.4.1 github.com/testground/sdk-go v0.2.6 go.opencensus.io v0.23.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index b6246d634..c16a429cc 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -37,9 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= -contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -78,8 +77,9 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -88,12 +88,13 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/GeertJohan/go.rice v1.0.2 h1:PtRw+Tg3oa3HYwiDBZyvOJ8LdIyf6lAovJJtr7YOAYk= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= -github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= @@ -113,8 +114,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -146,7 +147,9 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -159,12 +162,15 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= @@ -172,12 +178,13 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -212,22 +219,23 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/goterm v1.0.3 h1:7V/HeAQHrzPk/U4BvyH2g9u+xbUW9nr4yRPyG59W4fM= +github.com/buger/goterm v1.0.3/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -236,20 +244,17 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= @@ -269,8 +274,9 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -307,13 +313,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -327,12 +332,13 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -344,12 +350,13 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= +github.com/elastic/go-sysinfo v1.7.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.1 h1:T0aQ7n/n2ZA9W7DmAnj60v+qzqKERdBgJBO1CG2W6rc= +github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -358,6 +365,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= @@ -367,14 +375,17 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= @@ -384,27 +395,26 @@ github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.10.0/go.mod h1:uQtqy6vUAY5v70ZHdkF5mJ8CjVtjj/JA3aOoaqzWTVw= -github.com/filecoin-project/go-data-transfer v1.10.1 h1:YQNLwhizxkdfFxegAyrnn3l7WjgMjqDlqFzr18iWiYI= -github.com/filecoin-project/go-data-transfer v1.10.1/go.mod h1:CSDMCrPK2lVGodNB1wPEogjFvM9nVGyiL1GNbBRTSdw= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v1.12.0 h1:y44x35JvB93kezahMURKizIa/aizGTPSHqi5cbAfTEo= +github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.12.0 h1:RpU5bLaMADVrU4CgLxKMGHC2ZUocNV35uINxogQCf00= -github.com/filecoin-project/go-fil-markets v1.12.0/go.mod h1:XuuZFaFujI47nrgfQJiq7jWB+6rRya6nm7Sj6uXQ80U= +github.com/filecoin-project/go-fil-markets v1.13.5 h1:NLeF8rI5ZPOJNYEgA6NHrvnuh5QE/2dwuU/7wPW7zP0= +github.com/filecoin-project/go-fil-markets v1.13.5/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -412,12 +422,12 @@ github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+ github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= +github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= +github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 h1:0BogtftbcgyBx4lP2JWM00ZK7/pXmgnrDqKp9aLTgVs= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= @@ -426,39 +436,42 @@ github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e h1:XAgb6HmgXaGRklNjhZoNMSIYriKLqjWXIqYMotg6iSs= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1 h1:LR260vya4p++atgf256W6yV3Lxl5mKrBFcEZePWQrdg= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= +github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec h1:KV9vE+Sl2Y3qKsrpba4HcE7wHwK7v6O5U/S0xHbje6A= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211118013026-3dce48197cec/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff h1:JO62nquOGhjoDf9+JkAcV+wsD5yhoyIKOMj70ZNdD3Q= +github.com/filecoin-project/specs-storage v0.1.1-0.20211213202648-f14267c929ff/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -467,21 +480,21 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -498,17 +511,21 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -577,10 +594,10 @@ github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -607,8 +624,9 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -627,16 +645,20 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -668,8 +690,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -685,8 +707,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -744,7 +767,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -759,16 +781,21 @@ github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4n github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -796,12 +823,15 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= @@ -810,6 +840,10 @@ github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0 github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -821,8 +855,9 @@ github.com/influxdata/influxdb v1.9.4 h1:hZMq5fd4enVnruYHd7qCHsqG7kWQ/msA6x+kCvG github.com/influxdata/influxdb v1.9.4/go.mod h1:dR0WCHqaHPpJLaqWnRSl/QHsbXJR+QpofbZXyTc8ccw= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3 h1:k3/6a1Shi7GGCp9QpyYuXsMM6ncTOjCzOE9Fd6CDA+Q= github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -839,19 +874,16 @@ github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyq github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5 h1:euqZu96CCbToPyYVwVshu8ENURi8BhFd7FUFfTLi+fQ= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -861,7 +893,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -870,15 +901,15 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -886,39 +917,33 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.9.0/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= -github.com/ipfs/go-graphsync v0.9.1 h1:jo7ZaAZ3lal89RhKxKoRkPzIO8lmOY6KUWA1mDRZ2+U= -github.com/ipfs/go-graphsync v0.9.1/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= +github.com/ipfs/go-graphsync v0.11.0 h1:PiiD5CnoC3xEHMW8d6uBGqGcoTwiMB5d9CORIEyF6iA= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -933,16 +958,20 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.0.9 h1:OFyOfmuVDu9c5YtjSDORmwXzE6fmZikzZpzsnNkgFEg= +github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= github.com/ipfs/go-ipfs-http-client v0.0.6 h1:k2QllZyP7Fz5hMgsX5hvHfn1WPG9Ngdy5WknQ7JNhBM= github.com/ipfs/go-ipfs-http-client v0.0.6/go.mod h1:8e2dQbntMZKxLfny+tyXJ7bJHZFERp/2vyzZdvkeLMc= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= @@ -951,21 +980,26 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -985,14 +1019,15 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= @@ -1000,9 +1035,8 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-peertaskqueue v0.7.0 h1:VyO6G4sbzX80K58N60cCaHsSsypbUNs1GjO5seGNsQ0= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -1017,30 +1051,30 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.1-0.20210601190600-f512dac51e8e/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= -github.com/ipld/go-car v0.3.1-null-padded-files h1:FMD0Ce4tAM9P5aq7yklw2jnVK3ZuoJ4xK6vkL9VLmxs= -github.com/ipld/go-car v0.3.1-null-padded-files/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.0 h1:JapyKWTsJgmhrPI7hfx4V798c/RClr85sXfBZnH1VIw= -github.com/ipld/go-ipld-prime v0.12.0/go.mod h1:hy8b93WleDMRKumOJnTIrr0MbbFbx9GD6Kzxa53Xppc= +github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= +github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -1048,7 +1082,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -1071,8 +1104,9 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1086,6 +1120,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -1115,13 +1150,14 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= @@ -1138,8 +1174,9 @@ github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c/go.mod github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1163,8 +1200,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1178,21 +1216,20 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= @@ -1200,17 +1237,19 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= @@ -1222,9 +1261,9 @@ github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCy github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1254,8 +1293,12 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1266,8 +1309,8 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1275,8 +1318,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1295,17 +1338,17 @@ github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aD github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1322,24 +1365,27 @@ github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.2.9 h1:tVa7siDymmzOl3b3+SxPYpQUCnicmK13y6Re1PqWK+g= -github.com/libp2p/go-libp2p-peerstore v0.2.9/go.mod h1:zhBaLzxiWpNGQ3+uI17G/OIjmOD8GxKyFuHbrZbgs0w= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= -github.com/libp2p/go-libp2p-pubsub v0.5.4 h1:rHl9/Xok4zX3zgi0pg0XnUj9Xj2OeXO8oTu85q2+YA8= -github.com/libp2p/go-libp2p-pubsub v0.5.4/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1:2lH7rMlvDPSvXeOR+g7FE6aqiEwxtpxWKQL8uigk5fQ= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1364,10 +1410,11 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1377,22 +1424,26 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1404,10 +1455,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= -github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1424,12 +1475,14 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1442,13 +1495,15 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1463,11 +1518,11 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1485,28 +1540,30 @@ github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1525,12 +1582,12 @@ github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZb github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1540,8 +1597,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1551,8 +1609,9 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1577,7 +1636,6 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1590,6 +1648,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1598,6 +1658,7 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1608,10 +1669,12 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1620,8 +1683,9 @@ github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -1635,8 +1699,9 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.4.0 h1:hL/K4ZJhJ5PTw3nwylq9lGU5yArzcAroZmex1ghSEkQ= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.4.1 h1:Pq37uLx3hsyNlTDir7FZyU8+cFCTqd5y1KiM2IzOutI= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1660,10 +1725,9 @@ github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1671,13 +1735,13 @@ github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1692,10 +1756,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= @@ -1703,8 +1773,8 @@ github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1758,6 +1828,7 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1766,6 +1837,7 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1789,6 +1861,7 @@ github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1801,7 +1874,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1825,6 +1897,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= @@ -1838,17 +1911,18 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1862,6 +1936,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -1957,7 +2033,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1993,6 +2071,7 @@ github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixU github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -2033,7 +2112,6 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -2045,6 +2123,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 h1:TEv7MId88TyIqIUL4hbf9otOookIolMxlEbN0ro671Y= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= @@ -2086,6 +2165,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -2103,6 +2183,10 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -2117,13 +2201,23 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2138,8 +2232,9 @@ go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -2155,9 +2250,11 @@ go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= @@ -2187,14 +2284,15 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2206,12 +2304,16 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e h1:VvfwVmMH40bpMeizC9/K7ipM5Qjucuu16RWfneFPyhQ= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2227,7 +2329,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2245,8 +2346,9 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -2257,7 +2359,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2311,10 +2412,8 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2329,8 +2428,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2367,6 +2470,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2399,6 +2503,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2454,7 +2559,10 @@ golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2462,9 +2570,12 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2486,6 +2597,7 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2518,6 +2630,7 @@ golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2559,15 +2672,15 @@ golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2584,7 +2697,6 @@ google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2645,12 +2757,13 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2678,6 +2791,7 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2754,6 +2868,9 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= @@ -2774,6 +2891,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go index d91acdff9..38b8b504e 100644 --- a/testplans/lotus-soup/rfwp/chain_state.go +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -31,7 +31,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { @@ -40,7 +40,7 @@ func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { return err } diff --git a/testplans/lotus-soup/rfwp/html_chain_state.go b/testplans/lotus-soup/rfwp/html_chain_state.go index 7a3d56be4..3c840facd 100644 --- a/testplans/lotus-soup/rfwp/html_chain_state.go +++ b/testplans/lotus-soup/rfwp/html_chain_state.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/cli" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" "github.com/ipfs/go-cid" ) @@ -22,8 +22,9 @@ func FetchChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() api := m.FullApi - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { + return err } diff --git a/testplans/lotus-soup/testkit/deals.go b/testplans/lotus-soup/testkit/deals.go index f0910537d..703e6888a 100644 --- a/testplans/lotus-soup/testkit/deals.go +++ b/testplans/lotus-soup/testkit/deals.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func StartDeal(ctx context.Context, minerActorAddr address.Address, client api.FullNode, fcid cid.Cid, fastRetrieval bool) *cid.Cid { @@ -46,7 +46,7 @@ func WaitDealSealed(t *TestEnvironment, ctx context.Context, client api.FullNode cctx, cancel := context.WithCancel(ctx) defer cancel() - tipsetsCh, err := tstats.GetTips(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/node.go b/testplans/lotus-soup/testkit/node.go index e70f58e38..9506c4bf4 100644 --- a/testplans/lotus-soup/testkit/node.go +++ b/testplans/lotus-soup/testkit/node.go @@ -8,8 +8,8 @@ import ( "sort" "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/metrics" @@ -17,7 +17,11 @@ import ( "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules/dtypes" modtest "github.com/filecoin-project/lotus/node/modules/testing" - tstats "github.com/filecoin-project/lotus/tools/stats" + + tinflux "github.com/filecoin-project/lotus/tools/stats/influx" + tipldstore "github.com/filecoin-project/lotus/tools/stats/ipldstore" + tpoints "github.com/filecoin-project/lotus/tools/stats/points" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" influxdb "github.com/kpacha/opencensus-influxdb" ma "github.com/multiformats/go-multiaddr" @@ -234,7 +238,7 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err influxPass := "" influxDb := "testground" - influx, err := tstats.InfluxClient(influxAddr, influxUser, influxPass) + influxClient, err := tinflux.NewClient(influxAddr, influxUser, influxPass) if err != nil { t.RecordMessage(err.Error()) return err @@ -246,7 +250,38 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err go func() { time.Sleep(15 * time.Second) t.RecordMessage("calling tstats.Collect") - tstats.Collect(context.Background(), &v0api.WrapperV1Full{FullNode: api}, influx, influxDb, height, headlag) + + store, err := tipldstore.NewApiIpldStore(ctx, api, 1024) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + collector, err := tpoints.NewChainPointCollector(ctx, store, api) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + tipsets, err := tsync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headlag) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + wq := tinflux.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + t.RecordMessage(err.Error()) + return + } else { + nb.SetDatabase(influxDb) + wq.AddBatch(nb) + } + } + }() return nil diff --git a/testplans/lotus-soup/testkit/retrieval.go b/testplans/lotus-soup/testkit/retrieval.go index de3dee6be..67e8d1654 100644 --- a/testplans/lotus-soup/testkit/retrieval.go +++ b/testplans/lotus-soup/testkit/retrieval.go @@ -11,6 +11,7 @@ import ( "time" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/ipfs/go-cid" files "github.com/ipfs/go-ipfs-files" ipld "github.com/ipfs/go-ipld-format" @@ -51,7 +52,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, IsCAR: carExport, } t1 = time.Now() - err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) + err = (&v0api.WrapperV1Full{FullNode: client}).ClientRetrieve(ctx, v0api.OfferOrder(offers[0], caddr), ref) if err != nil { return err } @@ -77,7 +78,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, func ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), bytes.NewReader(rdata)) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go index fc821cd4d..7204c71fe 100644 --- a/testplans/lotus-soup/testkit/role_miner.go +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -182,7 +182,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return nil, err } - err = ds.Put(datastore.NewKey("miner-address"), minerAddr.Bytes()) + err = ds.Put(context.Background(), datastore.NewKey("miner-address"), minerAddr.Bytes()) if err != nil { return nil, err } diff --git a/testplans/lotus-soup/testkit/role_pubsub_tracer.go b/testplans/lotus-soup/testkit/role_pubsub_tracer.go index 5b13e6b81..401a9824d 100644 --- a/testplans/lotus-soup/testkit/role_pubsub_tracer.go +++ b/testplans/lotus-soup/testkit/role_pubsub_tracer.go @@ -30,7 +30,7 @@ func PreparePubsubTracer(t *TestEnvironment) (*PubsubTracer, error) { tracedIP := t.NetClient.MustGetDataNetworkIP().String() tracedAddr := fmt.Sprintf("/ip4/%s/tcp/4001", tracedIP) - host, err := libp2p.New(ctx, + host, err := libp2p.New( libp2p.Identity(privk), libp2p.ListenAddrStrings(tracedAddr), ) diff --git a/tools/packer/repo/config.toml b/tools/packer/repo/config.toml index 900dad218..380d5a28f 100644 --- a/tools/packer/repo/config.toml +++ b/tools/packer/repo/config.toml @@ -21,6 +21,7 @@ ListenAddresses = ["/ip4/0.0.0.0/tcp/5678", "/ip6/::/tcp/5678"] # IpfsMAddr = "" # IpfsUseForRetrieval = false # SimultaneousTransfersForStorage = 20 +# SimultaneousTransfersForStoragePerClient = 0 # SimultaneousTransfersForRetrieval = 20 # [Metrics] diff --git a/tools/stats/collect.go b/tools/stats/collect.go deleted file mode 100644 index e33ec994b..000000000 --- a/tools/stats/collect.go +++ /dev/null @@ -1,63 +0,0 @@ -package stats - -import ( - "context" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/v0api" - client "github.com/influxdata/influxdb1-client/v2" -) - -func Collect(ctx context.Context, api v0api.FullNode, influx client.Client, database string, height int64, headlag int) { - tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag) - if err != nil { - log.Fatal(err) - } - - wq := NewInfluxWriteQueue(ctx, influx) - defer wq.Close() - - for tipset := range tipsetsCh { - log.Infow("Collect stats", "height", tipset.Height()) - pl := NewPointList() - height := tipset.Height() - - if err := RecordTipsetPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record tipset", "height", height, "error", err) - continue - } - - if err := RecordTipsetMessagesPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record messages", "height", height, "error", err) - continue - } - - if err := RecordTipsetStatePoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record state", "height", height, "error", err) - continue - } - - // Instead of having to pass around a bunch of generic stuff we want for each point - // we will just add them at the end. - - tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - - nb, err := InfluxNewBatch() - if err != nil { - log.Fatal(err) - } - - for _, pt := range pl.Points() { - pt.SetTime(tsTimestamp) - - nb.AddPoint(NewPointFrom(pt)) - } - - nb.SetDatabase(database) - - log.Infow("Adding points", "count", len(nb.Points()), "height", tipset.Height()) - - wq.AddBatch(nb) - } -} diff --git a/tools/stats/head_buffer.go b/tools/stats/head_buffer.go deleted file mode 100644 index 0a7c63e6e..000000000 --- a/tools/stats/head_buffer.go +++ /dev/null @@ -1,47 +0,0 @@ -package stats - -import ( - "container/list" - - "github.com/filecoin-project/lotus/api" -) - -type headBuffer struct { - buffer *list.List - size int -} - -func newHeadBuffer(size int) *headBuffer { - buffer := list.New() - buffer.Init() - - return &headBuffer{ - buffer: buffer, - size: size, - } -} - -func (h *headBuffer) push(hc *api.HeadChange) (rethc *api.HeadChange) { - if h.buffer.Len() == h.size { - var ok bool - - el := h.buffer.Front() - rethc, ok = el.Value.(*api.HeadChange) - if !ok { - panic("Value from list is not the correct type") - } - - h.buffer.Remove(el) - } - - h.buffer.PushBack(hc) - - return -} - -func (h *headBuffer) pop() { - el := h.buffer.Back() - if el != nil { - h.buffer.Remove(el) - } -} diff --git a/tools/stats/head_buffer_test.go b/tools/stats/head_buffer_test.go deleted file mode 100644 index 4059f730e..000000000 --- a/tools/stats/head_buffer_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package stats - -import ( - "testing" - - "github.com/filecoin-project/lotus/api" - "github.com/stretchr/testify/require" -) - -func TestHeadBuffer(t *testing.T) { - - t.Run("Straight push through", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - }) - - t.Run("Reverts", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3a"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3b"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - hc = hb.push(&api.HeadChange{Type: "7"}) - require.Equal(t, hc.Type, "2") - hc = hb.push(&api.HeadChange{Type: "8"}) - require.Equal(t, hc.Type, "3b") - }) -} diff --git a/tools/stats/headbuffer/head_buffer.go b/tools/stats/headbuffer/head_buffer.go new file mode 100644 index 000000000..5f668ab6e --- /dev/null +++ b/tools/stats/headbuffer/head_buffer.go @@ -0,0 +1,56 @@ +package headbuffer + +import ( + "container/list" + + "github.com/filecoin-project/lotus/api" +) + +type HeadChangeStackBuffer struct { + buffer *list.List + size int +} + +// NewHeadChangeStackBuffer buffer HeadChange events to avoid having to +// deal with revert changes. Initialized size should be the average reorg +// size + 1 +func NewHeadChangeStackBuffer(size int) *HeadChangeStackBuffer { + buffer := list.New() + buffer.Init() + + return &HeadChangeStackBuffer{ + buffer: buffer, + size: size, + } +} + +// Push adds a HeadChange to stack buffer. If the length of +// the stack buffer grows larger than the initizlized size, the +// oldest HeadChange is returned. +func (h *HeadChangeStackBuffer) Push(hc *api.HeadChange) (rethc *api.HeadChange) { + if h.buffer.Len() >= h.size { + var ok bool + + el := h.buffer.Front() + rethc, ok = el.Value.(*api.HeadChange) + if !ok { + // This shouldn't be possible, this method is typed and is the only place data + // pushed to the buffer. + panic("A cosmic ray made me do it") + } + + h.buffer.Remove(el) + } + + h.buffer.PushBack(hc) + + return +} + +// Pop removes the last added HeadChange +func (h *HeadChangeStackBuffer) Pop() { + el := h.buffer.Back() + if el != nil { + h.buffer.Remove(el) + } +} diff --git a/tools/stats/headbuffer/head_buffer_test.go b/tools/stats/headbuffer/head_buffer_test.go new file mode 100644 index 000000000..8a748c714 --- /dev/null +++ b/tools/stats/headbuffer/head_buffer_test.go @@ -0,0 +1,42 @@ +package headbuffer + +import ( + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/stretchr/testify/require" +) + +func TestHeadBuffer(t *testing.T) { + t.Run("Straight Push through", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + }) + + t.Run("Reverts", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3a"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3b"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + hc = hb.Push(&api.HeadChange{Type: "7"}) + require.Equal(t, hc.Type, "2") + hc = hb.Push(&api.HeadChange{Type: "8"}) + require.Equal(t, hc.Type, "3b") + }) +} diff --git a/tools/stats/influx/influx.go b/tools/stats/influx/influx.go new file mode 100644 index 000000000..65fb4c0b9 --- /dev/null +++ b/tools/stats/influx/influx.go @@ -0,0 +1,133 @@ +package influx + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/filecoin-project/lotus/build" + + _ "github.com/influxdata/influxdb1-client" + models "github.com/influxdata/influxdb1-client/models" + client "github.com/influxdata/influxdb1-client/v2" +) + +type PointList struct { + points []models.Point +} + +func NewPointList() *PointList { + return &PointList{} +} + +func (pl *PointList) AddPoint(p models.Point) { + pl.points = append(pl.points, p) +} + +func (pl *PointList) Points() []models.Point { + return pl.points +} + +type WriteQueue struct { + ch chan client.BatchPoints +} + +func NewWriteQueue(ctx context.Context, influx client.Client) *WriteQueue { + ch := make(chan client.BatchPoints, 128) + + maxRetries := 10 + + go func() { + main: + for { + select { + case <-ctx.Done(): + return + case batch := <-ch: + for i := 0; i < maxRetries; i++ { + if err := influx.Write(batch); err != nil { + log.Warnw("Failed to write batch", "error", err) + build.Clock.Sleep(3 * time.Second) + continue + } + + continue main + } + + log.Error("dropping batch due to failure to write") + } + } + }() + + return &WriteQueue{ + ch: ch, + } +} + +func (i *WriteQueue) AddBatch(bp client.BatchPoints) { + i.ch <- bp +} + +func (i *WriteQueue) Close() { + close(i.ch) +} + +func NewClient(addr, user, pass string) (client.Client, error) { + return client.NewHTTPClient(client.HTTPConfig{ + Addr: addr, + Username: user, + Password: pass, + }) +} + +func NewBatch() (client.BatchPoints, error) { + return client.NewBatchPoints(client.BatchPointsConfig{}) +} + +func NewPoint(name string, value interface{}) models.Point { + pt, _ := models.NewPoint(name, models.Tags{}, + map[string]interface{}{"value": value}, build.Clock.Now().UTC()) + return pt +} + +func NewPointFrom(p models.Point) *client.Point { + return client.NewPointFrom(p) +} + +func ResetDatabase(influx client.Client, database string) error { + log.Debug("resetting database") + q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") + _, err := influx.Query(q) + if err != nil { + return err + } + log.Infow("database reset", "database", database) + return nil +} + +func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { + log.Debug("retrieving last record height") + q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") + res, err := influx.Query(q) + if err != nil { + return 0, err + } + + if len(res.Results) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + if len(res.Results[0].Series) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() + if err != nil { + return 0, err + } + + log.Infow("last record height", "height", height) + + return height, nil +} diff --git a/tools/stats/influx/log.go b/tools/stats/influx/log.go new file mode 100644 index 000000000..b3637d6b0 --- /dev/null +++ b/tools/stats/influx/log.go @@ -0,0 +1,7 @@ +package influx + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/influx") diff --git a/tools/stats/ipldstore/ipldstore.go b/tools/stats/ipldstore/ipldstore.go new file mode 100644 index 000000000..9adc599fd --- /dev/null +++ b/tools/stats/ipldstore/ipldstore.go @@ -0,0 +1,92 @@ +package ipldstore + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" +) + +type ApiIpldStore struct { + ctx context.Context + api apiIpldStoreApi + cache *lru.TwoQueueCache + cacheSize int +} + +type apiIpldStoreApi interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) +} + +func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi, cacheSize int) (*ApiIpldStore, error) { + store := &ApiIpldStore{ + ctx: ctx, + api: api, + cacheSize: cacheSize, + } + + cache, err := lru.New2Q(store.cacheSize) + if err != nil { + return nil, err + } + + store.cache = cache + + return store, nil +} + +func (ht *ApiIpldStore) Context() context.Context { + return ht.ctx +} + +func (ht *ApiIpldStore) read(ctx context.Context, c cid.Cid) ([]byte, error) { + stats.Record(ctx, metrics.IpldStoreCacheMiss.M(1)) + done := metrics.Timer(ctx, metrics.IpldStoreReadDuration) + defer done() + return ht.api.ChainReadObj(ctx, c) +} + +func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + done := metrics.Timer(ctx, metrics.IpldStoreGetDuration) + defer done() + defer func() { + stats.Record(ctx, metrics.IpldStoreCacheSize.M(int64(ht.cacheSize))) + stats.Record(ctx, metrics.IpldStoreCacheLength.M(int64(ht.cache.Len()))) + }() + + var raw []byte + + if a, ok := ht.cache.Get(c); ok { + stats.Record(ctx, metrics.IpldStoreCacheHit.M(1)) + raw = a.([]byte) + } else { + bs, err := ht.read(ctx, c) + if err != nil { + return err + } + + raw = bs + } + + cu, ok := out.(cbg.CBORUnmarshaler) + if ok { + if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { + return err + } + + ht.cache.Add(c, raw) + return nil + } + + return fmt.Errorf("Object does not implement CBORUnmarshaler") +} + +func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") +} diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go deleted file mode 100644 index ca3f26336..000000000 --- a/tools/stats/metrics.go +++ /dev/null @@ -1,418 +0,0 @@ -package stats - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math" - "math/big" - "strings" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "golang.org/x/xerrors" - - cbg "github.com/whyrusleeping/cbor-gen" - - _ "github.com/influxdata/influxdb1-client" - models "github.com/influxdata/influxdb1-client/models" - client "github.com/influxdata/influxdb1-client/v2" - - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("stats") - -type PointList struct { - points []models.Point -} - -func NewPointList() *PointList { - return &PointList{} -} - -func (pl *PointList) AddPoint(p models.Point) { - pl.points = append(pl.points, p) -} - -func (pl *PointList) Points() []models.Point { - return pl.points -} - -type InfluxWriteQueue struct { - ch chan client.BatchPoints -} - -func NewInfluxWriteQueue(ctx context.Context, influx client.Client) *InfluxWriteQueue { - ch := make(chan client.BatchPoints, 128) - - maxRetries := 10 - - go func() { - main: - for { - select { - case <-ctx.Done(): - return - case batch := <-ch: - for i := 0; i < maxRetries; i++ { - if err := influx.Write(batch); err != nil { - log.Warnw("Failed to write batch", "error", err) - build.Clock.Sleep(15 * time.Second) - continue - } - - continue main - } - - log.Error("Dropping batch due to failure to write") - } - } - }() - - return &InfluxWriteQueue{ - ch: ch, - } -} - -func (i *InfluxWriteQueue) AddBatch(bp client.BatchPoints) { - i.ch <- bp -} - -func (i *InfluxWriteQueue) Close() { - close(i.ch) -} - -func InfluxClient(addr, user, pass string) (client.Client, error) { - return client.NewHTTPClient(client.HTTPConfig{ - Addr: addr, - Username: user, - Password: pass, - }) -} - -func InfluxNewBatch() (client.BatchPoints, error) { - return client.NewBatchPoints(client.BatchPointsConfig{}) -} - -func NewPoint(name string, value interface{}) models.Point { - pt, _ := models.NewPoint(name, models.Tags{}, - map[string]interface{}{"value": value}, build.Clock.Now().UTC()) - return pt -} - -func NewPointFrom(p models.Point) *client.Point { - return client.NewPointFrom(p) -} - -func RecordTipsetPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := []string{} - for _, cid := range tipset.Cids() { - cids = append(cids, cid.String()) - } - - p := NewPoint("chain.height", int64(tipset.Height())) - p.AddTag("tipset", strings.Join(cids, " ")) - pl.AddPoint(p) - - p = NewPoint("chain.block_count", len(cids)) - pl.AddPoint(p) - - tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - p = NewPoint("chain.blocktime", tsTime.Unix()) - pl.AddPoint(p) - - totalGasLimit := int64(0) - totalUniqGasLimit := int64(0) - seen := make(map[cid.Cid]struct{}) - for _, blockheader := range tipset.Blocks() { - bs, err := blockheader.Serialize() - if err != nil { - return err - } - p := NewPoint("chain.election", blockheader.ElectionProof.WinCount) - p.AddTag("miner", blockheader.Miner.String()) - pl.AddPoint(p) - - p = NewPoint("chain.blockheader_size", len(bs)) - pl.AddPoint(p) - - msgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid()) - if err != nil { - return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) - } - for _, m := range msgs.BlsMessages { - c := m.Cid() - totalGasLimit += m.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.GasLimit - seen[c] = struct{}{} - } - } - for _, m := range msgs.SecpkMessages { - c := m.Cid() - totalGasLimit += m.Message.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.Message.GasLimit - seen[c] = struct{}{} - } - } - } - p = NewPoint("chain.gas_limit_total", totalGasLimit) - pl.AddPoint(p) - p = NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) - pl.AddPoint(p) - - { - baseFeeIn := tipset.Blocks()[0].ParentBaseFee - newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) - - baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) - baseFeeFloat, _ := baseFeeRat.Float64() - p = NewPoint("chain.basefee", baseFeeFloat) - pl.AddPoint(p) - - baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) - baseFeeChangeF, _ := baseFeeChange.Float64() - p = NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) - pl.AddPoint(p) - } - { - blks := int64(len(cids)) - p = NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - } - - return nil -} - -type ApiIpldStore struct { - ctx context.Context - api apiIpldStoreApi -} - -type apiIpldStoreApi interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) -} - -func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi) *ApiIpldStore { - return &ApiIpldStore{ctx, api} -} - -func (ht *ApiIpldStore) Context() context.Context { - return ht.ctx -} - -func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - raw, err := ht.api.ChainReadObj(ctx, c) - if err != nil { - return err - } - - cu, ok := out.(cbg.CBORUnmarshaler) - if ok { - if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { - return err - } - return nil - } - - return fmt.Errorf("Object does not implement CBORUnmarshaler") -} - -func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") -} - -func RecordTipsetStatePoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - attoFil := types.NewInt(build.FilecoinPrecision).Int - - //TODO: StatePledgeCollateral API is not implemented and is commented out - re-enable this block once the API is implemented again. - //pc, err := api.StatePledgeCollateral(ctx, tipset.Key()) - //if err != nil { - //return err - //} - - //pcFil := new(big.Rat).SetFrac(pc.Int, attoFil) - //pcFilFloat, _ := pcFil.Float64() - //p := NewPoint("chain.pledge_collateral", pcFilFloat) - //pl.AddPoint(p) - - netBal, err := api.WalletBalance(ctx, reward.Address) - if err != nil { - return err - } - - netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) - netBalFilFloat, _ := netBalFil.Float64() - p := NewPoint("network.balance", netBalFilFloat) - pl.AddPoint(p) - - totalPower, err := api.StateMinerPower(ctx, address.Address{}, tipset.Key()) - if err != nil { - return err - } - - // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. - // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner - // sizes up to 8192 yobibytes. - gibi := types.NewInt(1024 * 1024 * 1024) - p = NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) - pl.AddPoint(p) - - powerActor, err := api.StateGetActor(ctx, power.Address, tipset.Key()) - if err != nil { - return err - } - - powerActorState, err := power.Load(&ApiIpldStore{ctx, api}, powerActor) - if err != nil { - return err - } - - return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { - // BigCmp returns 0 if values are equal - if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { - return nil - } - - p = NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) - p.AddTag("miner", addr.String()) - pl.AddPoint(p) - - return nil - }) -} - -type msgTag struct { - actor string - method uint64 - exitcode uint8 -} - -func RecordTipsetMessagesPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := tipset.Cids() - if len(cids) == 0 { - return fmt.Errorf("no cids in tipset") - } - - msgs, err := api.ChainGetParentMessages(ctx, cids[0]) - if err != nil { - return err - } - - recp, err := api.ChainGetParentReceipts(ctx, cids[0]) - if err != nil { - return err - } - - msgn := make(map[msgTag][]cid.Cid) - - totalGasUsed := int64(0) - for _, r := range recp { - totalGasUsed += r.GasUsed - } - p := NewPoint("chain.gas_used_total", totalGasUsed) - pl.AddPoint(p) - - for i, msg := range msgs { - // FIXME: use float so this doesn't overflow - // FIXME: this doesn't work as time points get overridden - p := NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) - pl.AddPoint(p) - p = NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) - pl.AddPoint(p) - - bs, err := msg.Message.Serialize() - if err != nil { - return err - } - - p = NewPoint("chain.message_size", len(bs)) - pl.AddPoint(p) - - actor, err := api.StateGetActor(ctx, msg.Message.To, tipset.Key()) - if err != nil { - return err - } - - dm, err := multihash.Decode(actor.Code.Hash()) - if err != nil { - continue - } - tag := msgTag{ - actor: string(dm.Digest), - method: uint64(msg.Message.Method), - exitcode: uint8(recp[i].ExitCode), - } - - found := false - for _, c := range msgn[tag] { - if c.Equals(msg.Cid) { - found = true - break - } - } - if !found { - msgn[tag] = append(msgn[tag], msg.Cid) - } - } - - for t, m := range msgn { - p := NewPoint("chain.message_count", len(m)) - p.AddTag("actor", t.actor) - p.AddTag("method", fmt.Sprintf("%d", t.method)) - p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) - pl.AddPoint(p) - - } - - return nil -} - -func ResetDatabase(influx client.Client, database string) error { - log.Info("Resetting database") - q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") - _, err := influx.Query(q) - return err -} - -func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { - log.Info("Retrieving last record height") - q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") - res, err := influx.Query(q) - if err != nil { - return 0, err - } - - if len(res.Results) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - if len(res.Results[0].Series) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() - if err != nil { - return 0, err - } - - log.Infow("Last record height", "height", height) - - return height, nil -} diff --git a/tools/stats/metrics/metrics.go b/tools/stats/metrics/metrics.go new file mode 100644 index 000000000..e5178def1 --- /dev/null +++ b/tools/stats/metrics/metrics.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + + "github.com/filecoin-project/lotus/metrics" +) + +var Timer = metrics.Timer +var SinceInMilliseconds = metrics.SinceInMilliseconds + +// Distribution +var ( + defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 32, 64, 128, 256, 500, 1000, 2000, 3000, 5000, 10000, 20000, 30000, 40000, 50000, 60000) +) + +// Global Tags +var () + +// Measures +var ( + TipsetCollectionHeight = stats.Int64("tipset_collection/height", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionHeightExpected = stats.Int64("tipset_collection/height_expected", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionPoints = stats.Int64("tipset_collection/points", "Counter for total number of points collected", stats.UnitDimensionless) + TipsetCollectionDuration = stats.Float64("tipset_collection/total_ms", "Duration of tipset point collection", stats.UnitMilliseconds) + TipsetCollectionBlockHeaderDuration = stats.Float64("tipset_collection/block_header_ms", "Duration of block header point collection", stats.UnitMilliseconds) + TipsetCollectionMessageDuration = stats.Float64("tipset_collection/message_ms", "Duration of message point collection", stats.UnitMilliseconds) + TipsetCollectionStaterootDuration = stats.Float64("tipset_collection/stateroot_ms", "Duration of stateroot point collection", stats.UnitMilliseconds) + IpldStoreCacheSize = stats.Int64("ipld_store/cache_size", "Initialized size of the object read cache", stats.UnitDimensionless) + IpldStoreCacheLength = stats.Int64("ipld_store/cache_length", "Current length of object read cache", stats.UnitDimensionless) + IpldStoreCacheHit = stats.Int64("ipld_store/cache_hit", "Counter for total cache hits", stats.UnitDimensionless) + IpldStoreCacheMiss = stats.Int64("ipld_store/cache_miss", "Counter for total cache misses", stats.UnitDimensionless) + IpldStoreReadDuration = stats.Float64("ipld_store/read_ms", "Duration of object read request to lotus", stats.UnitMilliseconds) + IpldStoreGetDuration = stats.Float64("ipld_store/get_ms", "Duration of object get from store", stats.UnitMilliseconds) + WriteQueueSize = stats.Int64("write_queue/length", "Current length of the write queue", stats.UnitDimensionless) +) + +// Views +var ( + TipsetCollectionHeightView = &view.View{ + Measure: TipsetCollectionHeight, + Aggregation: view.LastValue(), + } + TipsetCollectionHeightExpectedView = &view.View{ + Measure: TipsetCollectionHeightExpected, + Aggregation: view.LastValue(), + } + TipsetCollectionPointsView = &view.View{ + Measure: TipsetCollectionPoints, + Aggregation: view.Sum(), + } + TipsetCollectionDurationView = &view.View{ + Measure: TipsetCollectionDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionBlockHeaderDurationView = &view.View{ + Measure: TipsetCollectionBlockHeaderDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionMessageDurationView = &view.View{ + Measure: TipsetCollectionMessageDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionStaterootDurationView = &view.View{ + Measure: TipsetCollectionStaterootDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreCacheSizeView = &view.View{ + Measure: IpldStoreCacheSize, + Aggregation: view.LastValue(), + } + IpldStoreCacheLengthView = &view.View{ + Measure: IpldStoreCacheLength, + Aggregation: view.LastValue(), + } + IpldStoreCacheHitView = &view.View{ + Measure: IpldStoreCacheHit, + Aggregation: view.Count(), + } + IpldStoreCacheMissView = &view.View{ + Measure: IpldStoreCacheMiss, + Aggregation: view.Count(), + } + IpldStoreReadDurationView = &view.View{ + Measure: IpldStoreReadDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreGetDurationView = &view.View{ + Measure: IpldStoreGetDuration, + Aggregation: defaultMillisecondsDistribution, + } +) + +// DefaultViews is an array of OpenCensus views for metric gathering purposes +var DefaultViews = []*view.View{ + TipsetCollectionHeightView, + TipsetCollectionHeightExpectedView, + TipsetCollectionPointsView, + TipsetCollectionDurationView, + TipsetCollectionBlockHeaderDurationView, + TipsetCollectionMessageDurationView, + TipsetCollectionStaterootDurationView, + IpldStoreCacheSizeView, + IpldStoreCacheLengthView, + IpldStoreCacheHitView, + IpldStoreCacheMissView, + IpldStoreReadDurationView, + IpldStoreGetDurationView, +} diff --git a/tools/stats/points/collect.go b/tools/stats/points/collect.go new file mode 100644 index 000000000..a7c37fcd9 --- /dev/null +++ b/tools/stats/points/collect.go @@ -0,0 +1,363 @@ +package points + +import ( + "context" + "fmt" + "math" + "math/big" + "strings" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + client "github.com/influxdata/influxdb1-client/v2" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "go.opencensus.io/stats" + "golang.org/x/xerrors" +) + +type LotusApi interface { + WalletBalance(context.Context, address.Address) (types.BigInt, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) +} + +type ChainPointCollector struct { + ctx context.Context + api LotusApi + store adt.Store + actorDigestCache *lru.TwoQueueCache +} + +func NewChainPointCollector(ctx context.Context, store adt.Store, api LotusApi) (*ChainPointCollector, error) { + actorDigestCache, err := lru.New2Q(2 << 15) + if err != nil { + return nil, err + } + + collector := &ChainPointCollector{ + ctx: ctx, + store: store, + actorDigestCache: actorDigestCache, + api: api, + } + + return collector, nil +} + +func (c *ChainPointCollector) actorDigest(ctx context.Context, addr address.Address, tipset *types.TipSet) (string, error) { + if code, ok := c.actorDigestCache.Get(addr); ok { + return code.(string), nil + } + + actor, err := c.api.StateGetActor(ctx, addr, tipset.Key()) + if err != nil { + return "", err + } + + dm, err := multihash.Decode(actor.Code.Hash()) + if err != nil { + return "", err + } + + digest := string(dm.Digest) + c.actorDigestCache.Add(addr, digest) + + return digest, nil +} + +func (c *ChainPointCollector) Collect(ctx context.Context, tipset *types.TipSet) (client.BatchPoints, error) { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionDuration) + defer func() { + log.Infow("record tipset", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + pl := influx.NewPointList() + height := tipset.Height() + + log.Debugw("collecting tipset points", "height", tipset.Height()) + stats.Record(ctx, metrics.TipsetCollectionHeight.M(int64(height))) + + if err := c.collectBlockheaderPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record tipset", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectMessagePoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record messages", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectStaterootPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record state", "height", height, "error", err, "tipset", tipset.Key()) + } + + tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + + nb, err := influx.NewBatch() + if err != nil { + return nil, err + } + + for _, pt := range pl.Points() { + pt.SetTime(tsTimestamp) + nb.AddPoint(influx.NewPointFrom(pt)) + } + + log.Infow("collected tipset points", "count", len(nb.Points()), "height", tipset.Height()) + + stats.Record(ctx, metrics.TipsetCollectionPoints.M(int64(len(nb.Points())))) + + return nb, nil +} + +func (c *ChainPointCollector) collectBlockheaderPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionBlockHeaderDuration) + defer func() { + log.Infow("collect blockheader points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := []string{} + for _, cid := range tipset.Cids() { + cids = append(cids, cid.String()) + } + + p := influx.NewPoint("chain.height", int64(tipset.Height())) + p.AddTag("tipset", strings.Join(cids, " ")) + pl.AddPoint(p) + + p = influx.NewPoint("chain.block_count", len(cids)) + pl.AddPoint(p) + + tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + p = influx.NewPoint("chain.blocktime", tsTime.Unix()) + pl.AddPoint(p) + + totalGasLimit := int64(0) + totalUniqGasLimit := int64(0) + seen := make(map[cid.Cid]struct{}) + for _, blockheader := range tipset.Blocks() { + bs, err := blockheader.Serialize() + if err != nil { + return err + } + p := influx.NewPoint("chain.election", blockheader.ElectionProof.WinCount) + p.AddTag("miner", blockheader.Miner.String()) + pl.AddPoint(p) + + p = influx.NewPoint("chain.blockheader_size", len(bs)) + pl.AddPoint(p) + + msgs, err := c.api.ChainGetBlockMessages(ctx, blockheader.Cid()) + if err != nil { + return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) + } + for _, m := range msgs.BlsMessages { + c := m.Cid() + totalGasLimit += m.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.GasLimit + seen[c] = struct{}{} + } + } + for _, m := range msgs.SecpkMessages { + c := m.Cid() + totalGasLimit += m.Message.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.Message.GasLimit + seen[c] = struct{}{} + } + } + } + p = influx.NewPoint("chain.gas_limit_total", totalGasLimit) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) + pl.AddPoint(p) + + { + baseFeeIn := tipset.Blocks()[0].ParentBaseFee + newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) + + baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) + baseFeeFloat, _ := baseFeeRat.Float64() + p = influx.NewPoint("chain.basefee", baseFeeFloat) + pl.AddPoint(p) + + baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) + baseFeeChangeF, _ := baseFeeChange.Float64() + p = influx.NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) + pl.AddPoint(p) + } + { + blks := int64(len(cids)) + p = influx.NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + } + + return nil +} + +func (c *ChainPointCollector) collectStaterootPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionStaterootDuration) + defer func() { + log.Infow("collect stateroot points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + attoFil := types.NewInt(build.FilecoinPrecision).Int + + netBal, err := c.api.WalletBalance(ctx, reward.Address) + if err != nil { + return err + } + + netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) + netBalFilFloat, _ := netBalFil.Float64() + p := influx.NewPoint("network.balance", netBalFilFloat) + pl.AddPoint(p) + + totalPower, err := c.api.StateMinerPower(ctx, address.Address{}, tipset.Key()) + if err != nil { + return err + } + + // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. + // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner + // sizes up to 8192 yobibytes. + gibi := types.NewInt(1024 * 1024 * 1024) + p = influx.NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) + pl.AddPoint(p) + + powerActor, err := c.api.StateGetActor(ctx, power.Address, tipset.Key()) + if err != nil { + return err + } + + powerActorState, err := power.Load(c.store, powerActor) + if err != nil { + return err + } + + return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + // BigCmp returns 0 if values are equal + if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { + return nil + } + + p = influx.NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) + p.AddTag("miner", addr.String()) + pl.AddPoint(p) + + return nil + }) +} + +type msgTag struct { + actor string + method uint64 + exitcode uint8 +} + +func (c *ChainPointCollector) collectMessagePoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionMessageDuration) + defer func() { + log.Infow("collect message points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := tipset.Cids() + if len(cids) == 0 { + return fmt.Errorf("no cids in tipset") + } + + msgs, err := c.api.ChainGetParentMessages(ctx, cids[0]) + if err != nil { + return err + } + + recp, err := c.api.ChainGetParentReceipts(ctx, cids[0]) + if err != nil { + return err + } + + msgn := make(map[msgTag][]cid.Cid) + + totalGasUsed := int64(0) + for _, r := range recp { + totalGasUsed += r.GasUsed + } + p := influx.NewPoint("chain.gas_used_total", totalGasUsed) + pl.AddPoint(p) + + for i, msg := range msgs { + digest, err := c.actorDigest(ctx, msg.Message.To, tipset) + if err != nil { + continue + } + + // FIXME: use float so this doesn't overflow + // FIXME: this doesn't work as time points get overridden + p := influx.NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) + pl.AddPoint(p) + p = influx.NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) + pl.AddPoint(p) + + bs, err := msg.Message.Serialize() + if err != nil { + return err + } + + p = influx.NewPoint("chain.message_size", len(bs)) + pl.AddPoint(p) + + tag := msgTag{ + actor: digest, + method: uint64(msg.Message.Method), + exitcode: uint8(recp[i].ExitCode), + } + + found := false + for _, c := range msgn[tag] { + if c.Equals(msg.Cid) { + found = true + break + } + } + if !found { + msgn[tag] = append(msgn[tag], msg.Cid) + } + } + + for t, m := range msgn { + p := influx.NewPoint("chain.message_count", len(m)) + p.AddTag("actor", t.actor) + p.AddTag("method", fmt.Sprintf("%d", t.method)) + p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) + pl.AddPoint(p) + + } + + return nil +} diff --git a/tools/stats/points/log.go b/tools/stats/points/log.go new file mode 100644 index 000000000..e0cb795c0 --- /dev/null +++ b/tools/stats/points/log.go @@ -0,0 +1,7 @@ +package points + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/points") diff --git a/tools/stats/rpc.go b/tools/stats/rpc.go deleted file mode 100644 index 4e503cb39..000000000 --- a/tools/stats/rpc.go +++ /dev/null @@ -1,228 +0,0 @@ -package stats - -import ( - "context" - "net/http" - "time" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - manet "github.com/multiformats/go-multiaddr/net" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -func getAPI(path string) (string, http.Header, error) { - r, err := repo.NewFS(path) - if err != nil { - return "", nil, err - } - - ma, err := r.APIEndpoint() - if err != nil { - return "", nil, xerrors.Errorf("failed to get api endpoint: %w", err) - } - _, addr, err := manet.DialArgs(ma) - if err != nil { - return "", nil, err - } - var headers http.Header - token, err := r.APIToken() - if err != nil { - log.Warnw("Couldn't load CLI token, capabilities may be limited", "error", err) - } else { - headers = http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - } - - return "ws://" + addr + "/rpc/v0", headers, nil -} - -func WaitForSyncComplete(ctx context.Context, napi v0api.FullNode) error { -sync_complete: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - state, err := napi.SyncState(ctx) - if err != nil { - return err - } - - for i, w := range state.ActiveSyncs { - if w.Target == nil { - continue - } - - if w.Stage == api.StageSyncErrored { - log.Errorw( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "error", w.Message, - "stage", w.Stage.String(), - ) - } else { - log.Infow( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "stage", w.Stage.String(), - ) - } - - if w.Stage == api.StageSyncComplete { - break sync_complete - } - } - } - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - head, err := napi.ChainHead(ctx) - if err != nil { - return err - } - - timestampDelta := build.Clock.Now().Unix() - int64(head.MinTimestamp()) - - log.Infow( - "Waiting for reasonable head height", - "height", head.Height(), - "timestamp_delta", timestampDelta, - ) - - // If we get within 20 blocks of the current exected block height we - // consider sync complete. Block propagation is not always great but we still - // want to be recording stats as soon as we can - if timestampDelta < int64(build.BlockDelaySecs)*20 { - return nil - } - } - } -} - -func GetTips(ctx context.Context, api v0api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { - chmain := make(chan *types.TipSet) - - hb := newHeadBuffer(headlag) - - notif, err := api.ChainNotify(ctx) - if err != nil { - return nil, err - } - - go func() { - defer close(chmain) - - ticker := time.NewTicker(30 * time.Second) - defer ticker.Stop() - - for { - select { - case changes, ok := <-notif: - if !ok { - return - } - for _, change := range changes { - log.Infow("Head event", "height", change.Val.Height(), "type", change.Type) - - switch change.Type { - case store.HCCurrent: - tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) - if err != nil { - log.Info(err) - return - } - - for _, tipset := range tipsets { - chmain <- tipset - } - case store.HCApply: - if out := hb.push(change); out != nil { - chmain <- out.Val - } - case store.HCRevert: - hb.pop() - } - } - case <-ticker.C: - log.Info("Running health check") - - cctx, cancel := context.WithTimeout(ctx, 5*time.Second) - - if _, err := api.ID(cctx); err != nil { - log.Error("Health check failed") - cancel() - return - } - - cancel() - - log.Info("Node online") - case <-ctx.Done(): - return - } - } - }() - - return chmain, nil -} - -func loadTipsets(ctx context.Context, api v0api.FullNode, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { - tipsets := []*types.TipSet{} - for { - if curr.Height() == 0 { - break - } - - if curr.Height() <= lowestHeight { - break - } - - log.Infow("Walking back", "height", curr.Height()) - tipsets = append(tipsets, curr) - - tsk := curr.Parents() - prev, err := api.ChainGetTipSet(ctx, tsk) - if err != nil { - return tipsets, err - } - - curr = prev - } - - for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { - tipsets[i], tipsets[j] = tipsets[j], tipsets[i] - } - - return tipsets, nil -} - -func GetFullNodeAPI(ctx context.Context, repo string) (v0api.FullNode, jsonrpc.ClientCloser, error) { - addr, headers, err := getAPI(repo) - if err != nil { - return nil, nil, err - } - - return client.NewFullNodeRPCV0(ctx, addr, headers) -} diff --git a/tools/stats/sync/log.go b/tools/stats/sync/log.go new file mode 100644 index 000000000..1c2233cc8 --- /dev/null +++ b/tools/stats/sync/log.go @@ -0,0 +1,7 @@ +package sync + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/sync") diff --git a/tools/stats/sync/sync.go b/tools/stats/sync/sync.go new file mode 100644 index 000000000..c8db1c543 --- /dev/null +++ b/tools/stats/sync/sync.go @@ -0,0 +1,192 @@ +package sync + +import ( + "context" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/headbuffer" +) + +type SyncWaitApi interface { + SyncState(context.Context) (*api.SyncState, error) + ChainHead(context.Context) (*types.TipSet, error) +} + +// SyncWait returns when ChainHead is within 20 epochs of the expected height +func SyncWait(ctx context.Context, napi SyncWaitApi) error { + for { + state, err := napi.SyncState(ctx) + if err != nil { + return err + } + + if len(state.ActiveSyncs) == 0 { + build.Clock.Sleep(time.Second) + continue + } + + head, err := napi.ChainHead(ctx) + if err != nil { + return err + } + + working := -1 + for i, ss := range state.ActiveSyncs { + switch ss.Stage { + case api.StageSyncComplete: + default: + working = i + case api.StageIdle: + // not complete, not actively working + } + } + + if working == -1 { + working = len(state.ActiveSyncs) - 1 + } + + ss := state.ActiveSyncs[working] + + if ss.Base == nil || ss.Target == nil { + log.Infow( + "syncing", + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } else { + log.Infow( + "syncing", + "base", ss.Base.Key(), + "target", ss.Target.Key(), + "target_height", ss.Target.Height(), + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } + + if build.Clock.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs)*30 { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-build.Clock.After(time.Duration(int64(build.BlockDelaySecs) * int64(time.Second))): + } + } + + return nil +} + +type BufferedTipsetChannelApi interface { + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + Version(context.Context) (api.APIVersion, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +// BufferedTipsetChannel returns an unbuffered channel of tipsets. Buffering occurs internally to handle revert +// ChainNotify changes. The returned channel can output tipsets at the same height twice if a reorg larger the the +// provided `size` occurs. +func BufferedTipsetChannel(ctx context.Context, api BufferedTipsetChannelApi, lastHeight abi.ChainEpoch, size int) (<-chan *types.TipSet, error) { + chmain := make(chan *types.TipSet) + + hb := headbuffer.NewHeadChangeStackBuffer(size) + + notif, err := api.ChainNotify(ctx) + if err != nil { + return nil, err + } + + go func() { + defer close(chmain) + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case changes, ok := <-notif: + if !ok { + return + } + for _, change := range changes { + log.Debugw("head event", "height", change.Val.Height(), "type", change.Type) + + switch change.Type { + case store.HCCurrent: + tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) + if err != nil { + log.Info(err) + return + } + + for _, tipset := range tipsets { + chmain <- tipset + } + case store.HCApply: + if out := hb.Push(change); out != nil { + chmain <- out.Val + } + case store.HCRevert: + hb.Pop() + } + } + case <-ticker.C: + log.Debug("running health check") + + cctx, cancel := context.WithTimeout(ctx, 5*time.Second) + + if _, err := api.Version(cctx); err != nil { + log.Error("health check failed") + cancel() + return + } + + cancel() + + log.Debug("node online") + case <-ctx.Done(): + return + } + } + }() + + return chmain, nil +} + +func loadTipsets(ctx context.Context, api BufferedTipsetChannelApi, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { + log.Infow("loading tipsets", "to_height", lowestHeight, "from_height", curr.Height()) + tipsets := []*types.TipSet{} + for { + if curr.Height() == 0 { + break + } + + if curr.Height() <= lowestHeight { + break + } + + log.Debugw("walking back", "height", curr.Height()) + tipsets = append(tipsets, curr) + + tsk := curr.Parents() + prev, err := api.ChainGetTipSet(ctx, tsk) + if err != nil { + return tipsets, err + } + + curr = prev + } + + for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { + tipsets[i], tipsets[j] = tipsets[j], tipsets[i] + } + + return tipsets, nil +}