diff --git a/.circleci/config.yml b/.circleci/config.yml index a4a88a090..53611d565 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -895,6 +895,11 @@ workflows: suite: itest-sector_terminate target: "./itests/sector_terminate_test.go" + - test: + name: test-itest-self_sent_txn + suite: itest-self_sent_txn + target: "./itests/self_sent_txn_test.go" + - test: name: test-itest-tape suite: itest-tape @@ -945,7 +950,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: diff --git a/.circleci/template.yml b/.circleci/template.yml index 666ad39f5..ef6818c6d 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -785,7 +785,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..c6273f056 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +## Related Issues + + +## Proposed Changes + + + +## Additional Info + + +## Checklist + +Before you mark the PR ready for review, please make sure that: +- [ ] All commits have a clear commit message. +- [ ] The PR title is in the form of of `: : ` + - example: ` fix: mempool: Introduce a cache for valid signatures` + - `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_, _misc_,_perf_, _refactor_, _revert_, _style_, _test_ + - `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_ +- [ ] This PR has tests for new functionality or change in behaviour +- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials) +- [ ] CI is green diff --git a/CHANGELOG.md b/CHANGELOG.md index c2905389c..a420421de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,71 +1,105 @@ # Lotus changelog -# v1.13.2-rc7 / 2022-01-05 +# v1.13.2 / 2022-01-09 -This is the 7th RC for lotus v1.13.2. This will be a highly recommended release with sealing pipeline fixes, worker -management and scheduler enhancement, retrieval improvements and so on. More detailed changelog will be updated later. +Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like +worker management, schedule enhancements and so on. -- github.com/filecoin-project/lotus: - - stores: Reduce log spam during retrievals - - fix lint - - Fix mock ReadPiece - - fr32: Reduce MTTresh from 32M to 512k per core - - piecereader: Avoid allocating 1024MB slices per read - - piecereader: Avoid redundant roundtrips when seeking - - piecereader: Move closer to storage - - build: release: v1.13.2-rc3 ([filecoin-project/lotus#7752](https://github.com/filecoin-project/lotus/pull/7752)) - - build: release: v1.13.2-rc2 ([filecoin-project/lotus#7745](https://github.com/filecoin-project/lotus/pull/7745)) - - v1.13.2-rc1 ([filecoin-project/lotus#7718](https://github.com/filecoin-project/lotus/pull/7718)) - - Address Scheduler enhancements (#7703) review ([filecoin-project/lotus#7714](https://github.com/filecoin-project/lotus/pull/7714)) - - Scheduler enhancements ([filecoin-project/lotus#7703](https://github.com/filecoin-project/lotus/pull/7703)) - - ffiwrapper: Validate PC2 by calling C1 with random seeds ([filecoin-project/lotus#7710](https://github.com/filecoin-project/lotus/pull/7710)) - - fix logic error ([filecoin-project/lotus#7709](https://github.com/filecoin-project/lotus/pull/7709)) - - Update go-graphsync v0.10.6 ([filecoin-project/lotus#7708](https://github.com/filecoin-project/lotus/pull/7708)) - - Add verbose mode to lotus-miner pieces list-cids ([filecoin-project/lotus#7699](https://github.com/filecoin-project/lotus/pull/7699)) - - retrieval: Only output matching nodes, MatchPath dagspec ([filecoin-project/lotus#7706](https://github.com/filecoin-project/lotus/pull/7706)) - - disable mplex stream muxer ([filecoin-project/lotus#7689](https://github.com/filecoin-project/lotus/pull/7689)) - - Cleanup partial retrieval codepaths ( zero functional changes ) ([filecoin-project/lotus#7688](https://github.com/filecoin-project/lotus/pull/7688)) - - Make small retrieval 200x faster ([filecoin-project/lotus#7693](https://github.com/filecoin-project/lotus/pull/7693)) - - Releases back to master ([filecoin-project/lotus#7698](https://github.com/filecoin-project/lotus/pull/7698)) - - Add RLE dump code ([filecoin-project/lotus#7691](https://github.com/filecoin-project/lotus/pull/7691)) - - Update archive script ([filecoin-project/lotus#7690](https://github.com/filecoin-project/lotus/pull/7690)) - - storage: Use 1M buffers for Tar transfers ([filecoin-project/lotus#7681](https://github.com/filecoin-project/lotus/pull/7681)) - - Chore/dm level tests plus merkle proof cars ([filecoin-project/lotus#7673](https://github.com/filecoin-project/lotus/pull/7673)) - - Partial retrieval ux improvements ([filecoin-project/lotus#7610](https://github.com/filecoin-project/lotus/pull/7610)) - - Sector storage groups ([filecoin-project/lotus#7453](https://github.com/filecoin-project/lotus/pull/7453)) - - docsgen-cli: Handle commands with no description correctly ([filecoin-project/lotus#7659](https://github.com/filecoin-project/lotus/pull/7659)) - - shed: simple wallet balancer util ([filecoin-project/lotus#7414](https://github.com/filecoin-project/lotus/pull/7414)) - - remote store: Remove debug printf ([filecoin-project/lotus#7664](https://github.com/filecoin-project/lotus/pull/7664)) - - Bump ws from 5.2.2 to 5.2.3 in /lotuspond/front ([filecoin-project/lotus#7660](https://github.com/filecoin-project/lotus/pull/7660)) - - Bump color-string from 1.5.3 to 1.6.0 in /lotuspond/front ([filecoin-project/lotus#7658](https://github.com/filecoin-project/lotus/pull/7658)) - - Bump postcss from 7.0.17 to 7.0.39 in /lotuspond/front ([filecoin-project/lotus#7657](https://github.com/filecoin-project/lotus/pull/7657)) - - Bump path-parse from 1.0.6 to 1.0.7 in /lotuspond/front ([filecoin-project/lotus#7656](https://github.com/filecoin-project/lotus/pull/7656)) - - Bump tmpl from 1.0.4 to 1.0.5 in /lotuspond/front ([filecoin-project/lotus#7655](https://github.com/filecoin-project/lotus/pull/7655)) - - Bump url-parse from 1.4.7 to 1.5.3 in /lotuspond/front ([filecoin-project/lotus#7654](https://github.com/filecoin-project/lotus/pull/7654)) - - Add caches to lotus-stats and splitcode ([filecoin-project/lotus#7329](https://github.com/filecoin-project/lotus/pull/7329)) - - add additional methods to lotus gateway ([filecoin-project/lotus#7644](https://github.com/filecoin-project/lotus/pull/7644)) - - checkCommit should return SectorCommitFailed ([filecoin-project/lotus#7555](https://github.com/filecoin-project/lotus/pull/7555)) - - Wdpost worker: Reduce challenge confidence to 1 epoch ([filecoin-project/lotus#7572](https://github.com/filecoin-project/lotus/pull/7572)) - - remove api and jaeger env from docker file ([filecoin-project/lotus#7624](https://github.com/filecoin-project/lotus/pull/7624)) - - CLI: Add a lotus multisig cancel command ([filecoin-project/lotus#7645](https://github.com/filecoin-project/lotus/pull/7645)) - - remove jaeger envvars ([filecoin-project/lotus#7631](https://github.com/filecoin-project/lotus/pull/7631)) - - lotus-shed msg: Decode submessages/msig proposals ([filecoin-project/lotus#7639](https://github.com/filecoin-project/lotus/pull/7639)) - - add log for restart windows post scheduler ([filecoin-project/lotus#7613](https://github.com/filecoin-project/lotus/pull/7613)) - - Shed: Add a util to list miner faults ([filecoin-project/lotus#7605](https://github.com/filecoin-project/lotus/pull/7605)) - - add timeout flag to wait-api command ([filecoin-project/lotus#7592](https://github.com/filecoin-project/lotus/pull/7592)) - - Shed: Add a util to create miners more easily ([filecoin-project/lotus#7595](https://github.com/filecoin-project/lotus/pull/7595)) - - Update go-state-types ([filecoin-project/lotus#7591](https://github.com/filecoin-project/lotus/pull/7591)) - - add missing NodeType tag ([filecoin-project/lotus#7559](https://github.com/filecoin-project/lotus/pull/7559)) - - update go-libp2p-pubsub to v0.5.6 ([filecoin-project/lotus#7581](https://github.com/filecoin-project/lotus/pull/7581)) - - bump the master version to v1.13.2-dev ([filecoin-project/lotus#7568](https://github.com/filecoin-project/lotus/pull/7568)) -- github.com/filecoin-project/go-fil-markets (v1.13.3 -> v1.13.4): - failed to fetch repo +## Highlights +- πŸš€πŸš€πŸš€Improve retrieval deal experience + - Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer + speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new + retrieval APIs to provide a better client experience. + - 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693)) + - This change makes most free, small retrievals sub-second + - 🌟🌟🌟 Partial retrieval ux improvements ([#7610](https://github.com/filecoin-project/lotus/pull/7610)) + - New retrieval commands for clients: + - `lotus client ls`: retrieve and list desired object links + - `lotus client cat`: retrieve and print the data from the network + - 🌟🌟 The monolith `ClientRetrieve` method was broken into: + - `ClientRetrieve` which retrieves data into the local repo (or into an IPFS node if ipfs integration is enabled) + - `ClientRetrieveWait` which will wait for the retrieval to complete + - `ClientExport` which will export data from the local node + - Note: this change only applies to v1 API. v0 API remains unchanged. + - 🌟 Support for full ipld selectors was added (for example making it possible to only retrieve list of directories in a deal, without fetching any file data) + - To learn more, see [here](https://github.com/filecoin-project/lotus/blob/0523c946f984b22b3f5de8cc3003cc791389527e/api/types.go#L230-L264) +- πŸš€πŸš€ Sealing scheduler enhancements ([#7703](https://github.com/filecoin-project/lotus/pull/7703), + [#7269](https://github.com/filecoin-project/lotus/pull/7269)), [#7714](https://github.com/filecoin-project/lotus/pull/7714) + - Workers are now aware of cgroup memory limits + - Multiple tasks which use a GPU can be scheduled on a single worker + - Workers can override default resource table through env vars + - Default value list: https://gist.github.com/magik6k/c0e1c7cd73c1241a9acabc30bf469a43 +- πŸš€πŸš€ Sector storage groups ([#7453](https://github.com/filecoin-project/lotus/pull/7453)) + - Storage groups allow for better control of data flow between workers, for example, it makes it possible to define that data from PC1 on a given worker has to have it's PC2 step executed on the same worker + - To set it up, follow the instructions under the `Sector Storage Group` section [here](https://lotus.filecoin.io/docs/storage-providers/seal-workers/#lotus-worker-co-location) + +## New Features +- Add RLE dump code ([#7691](https://github.com/filecoin-project/lotus/pull/7691)) +- Shed: Add a util to list miner faults ([#7605](https://github.com/filecoin-project/lotus/pull/7605)) +- lotus-shed msg: Decode submessages/msig proposals ([#7639](https://github.com/filecoin-project/lotus/pull/7639)) +- CLI: Add a lotus multisig cancel command ([#7645](https://github.com/filecoin-project/lotus/pull/7645)) +- shed: simple wallet balancer util ([#7414](https://github.com/filecoin-project/lotus/pull/7414)) + - balancing token balance between multiple accounts + +## Improvements +- Add verbose mode to `lotus-miner pieces list-cids` ([#7699](https://github.com/filecoin-project/lotus/pull/7699)) +- retrieval: Only output matching nodes, MatchPath dagspec ([#7706](https://github.com/filecoin-project/lotus/pull/7706)) +- Cleanup partial retrieval codepaths ( zero functional changes ) ([#7688](https://github.com/filecoin-project/lotus/pull/7688)) +- storage: Use 1M buffers for Tar transfers ([#7681](https://github.com/filecoin-project/lotus/pull/7681)) +- Chore/dm level tests plus merkle proof cars ([#7673](https://github.com/filecoin-project/lotus/pull/7673)) +- Shed: Add a util to create miners more easily ([#7595](https://github.com/filecoin-project/lotus/pull/7595)) +- add timeout flag to wait-api command ([#7592](https://github.com/filecoin-project/lotus/pull/7592)) +- add log for restart windows post scheduler ([#7613](https://github.com/filecoin-project/lotus/pull/7613)) +- remove jaeger envvars ([#7631](https://github.com/filecoin-project/lotus/pull/7631)) +- remove api and jaeger env from docker file ([#7624](https://github.com/filecoin-project/lotus/pull/7624)) +- Wdpost worker: Reduce challenge confidence to 1 epoch ([#7572](https://github.com/filecoin-project/lotus/pull/7572)) +- add additional methods to lotus gateway ([#7644](https://github.com/filecoin-project/lotus/pull/7644)) +- Add caches to lotus-stats and splitcode ([#7329](https://github.com/filecoin-project/lotus/pull/7329)) +- remote store: Remove debug printf ([#7664](https://github.com/filecoin-project/lotus/pull/7664)) +- docsgen-cli: Handle commands with no description correctly ([#7659](https://github.com/filecoin-project/lotus/pull/7659)) + +## Bug Fixes +- fix docker logic error ([#7709](https://github.com/filecoin-project/lotus/pull/7709)) +- add missing NodeType tag ([#7559](https://github.com/filecoin-project/lotus/pull/7559)) +- checkCommit should return SectorCommitFailed ([#7555](https://github.com/filecoin-project/lotus/pull/7555)) +- ffiwrapper: Validate PC2 by calling C1 with random seeds ([#7710](https://github.com/filecoin-project/lotus/pull/7710)) + +## Dependency Updates +- Update go-graphsync v0.10.6 ([#7708](https://github.com/filecoin-project/lotus/pull/7708)) +- update go-libp2p-pubsub to v0.5.6 ([#7581](https://github.com/filecoin-project/lotus/pull/7581)) +- Update go-state-types ([#7591](https://github.com/filecoin-project/lotus/pull/7591)) +- disable mplex stream muxer ([#7689](https://github.com/filecoin-project/lotus/pull/7689)) +- Bump ws from 5.2.2 to 5.2.3 in /lotuspond/front ([#7660](https://github.com/filecoin-project/lotus/pull/7660)) +- Bump color-string from 1.5.3 to 1.6.0 in /lotuspond/front ([#7658](https://github.com/filecoin-project/lotus/pull/7658)) +- Bump postcss from 7.0.17 to 7.0.39 in /lotuspond/front ([#7657](https://github.com/filecoin-project/lotus/pull/7657)) +- Bump path-parse from 1.0.6 to 1.0.7 in /lotuspond/front ([#7656](https://github.com/filecoin-project/lotus/pull/7656)) +- Bump tmpl from 1.0.4 to 1.0.5 in /lotuspond/front ([#7655](https://github.com/filecoin-project/lotus/pull/7655)) +- Bump url-parse from 1.4.7 to 1.5.3 in /lotuspond/front ([#7654](https://github.com/filecoin-project/lotus/pull/7654)) - github.com/filecoin-project/go-state-types (v0.1.1-0.20210915140513-d354ccf10379 -> v0.1.1): - - Add v14 - - Add PoRep -> UpdateProof mapping - - Intoduce update proofs enums - - Update golangci-lint for comatibility with Go 1.17 - - Update execution image to maybe update debian version + +## Others +- Update archive script ([#7690](https://github.com/filecoin-project/lotus/pull/7690)) + +## Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k | 89 | +5200/-1818 | 232 | +| Travis Person | 5 | +1473/-953 | 38 | +| @arajasek | 6 | +550/-38 | 19 | +| @clinta | 4 | +393/-123 | 26 | +| @ribasushi | 3 | +334/-68 | 7 | +| @jennijuju| 13 | +197/-120 | 67 | +| @Kubuxu | 10 | +153/-30 | 10 | +| @coryschwartz | 6 | +18/-26 | 6 | +| Marten Seemann | 2 | +6/-34 | 5 | +| @vyzo | 1 | +3/-3 | 2 | +| @hannahhoward | 1 | +3/-3 | 2 | +| @zenground0 | 2 | +2/-2 | 2 | +| @yaohcn | 2 | +2/-2 | 2 | +| @jennijuju | 1 | +1/-1 | 1 | +| @hunjixin | 1 | +1/-0 | 1 | + # v1.13.1 / 2021-11-26 diff --git a/api/api_full.go b/api/api_full.go index 06aaff99c..cf58a3cc6 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -900,6 +900,7 @@ type QueryOffer struct { Size uint64 MinPrice types.BigInt UnsealPrice types.BigInt + PricePerByte abi.TokenAmount PaymentInterval uint64 PaymentIntervalIncrease uint64 Miner address.Address @@ -1083,7 +1084,7 @@ type CirculatingSupply struct { type MiningBaseInfo struct { MinerPower types.BigInt NetworkPower types.BigInt - Sectors []builtin.SectorInfo + Sectors []builtin.ExtendedSectorInfo WorkerKey address.Address SectorSize abi.SectorSize PrevBeaconEntry types.BeaconEntry diff --git a/api/api_storage.go b/api/api_storage.go index 8cca2aa5b..c032a8e1b 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/specs-storage/storage" @@ -99,8 +100,8 @@ type StorageMiner interface { // Returns null if message wasn't sent SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message - SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin - SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin + SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. // Returns null if message wasn't sent SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin @@ -111,6 +112,7 @@ type StorageMiner interface { SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true @@ -118,17 +120,21 @@ type StorageMiner interface { WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin //storiface.WorkerReturn - ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true - ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true - ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true + ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true + ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true // SealingSchedDiag dumps internal sealing scheduler state SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin @@ -145,6 +151,7 @@ type StorageMiner interface { StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin @@ -160,6 +167,8 @@ type StorageMiner interface { MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write + // MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync + MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer @@ -246,7 +255,7 @@ type StorageMiner interface { CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin - ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read + ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read } var _ storiface.WorkerReturn = *new(StorageMiner) diff --git a/api/api_worker.go b/api/api_worker.go index 4553c30e0..68d8e7baf 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -39,6 +39,10 @@ type Worker interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 5478e5ea6..571599935 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -1,6 +1,7 @@ package docgen import ( + "encoding/json" "fmt" "go/ast" "go/parser" @@ -15,6 +16,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -120,6 +122,7 @@ func init() { addExample(api.FullAPIVersion1) addExample(api.PCHInbound) addExample(time.Minute) + addExample(graphsync.RequestID(4)) addExample(datatransfer.TransferID(3)) addExample(datatransfer.Ongoing) addExample(storeIDExample) @@ -250,10 +253,18 @@ func init() { addExample(map[abi.SectorNumber]string{ 123: "can't acquire read lock", }) + addExample(json.RawMessage(`"json raw message"`)) addExample(map[api.SectorState]int{ api.SectorState(sealing.Proving): 120, }) addExample([]abi.SectorNumber{123, 124}) + addExample([]storiface.SectorLock{ + { + Sector: abi.SectorID{Number: 123, Miner: 1000}, + Write: [storiface.FileTypes]uint{0, 0, 1}, + Read: [storiface.FileTypes]uint{2, 3, 0}, + }, + }) // worker specific addExample(storiface.AcquireMove) @@ -339,7 +350,7 @@ func ExampleValue(method string, t, parent reflect.Type) interface{} { switch t.Kind() { case reflect.Slice: out := reflect.New(t).Elem() - reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) + out = reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) return out.Interface() case reflect.Chan: return ExampleValue(method, t.Elem(), nil) diff --git a/api/proxy_gen.go b/api/proxy_gen.go index feb08531f..0a644e585 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" + abinetwork "github.com/filecoin-project/go-state-types/network" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -620,7 +621,7 @@ type StorageMinerStruct struct { CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` - ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"` + ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"` CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` @@ -668,6 +669,8 @@ type StorageMinerStruct struct { MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"` + MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` @@ -716,12 +719,20 @@ type StorageMinerStruct struct { ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnProveReplicaUpdate1 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error `perm:"admin"` + + ReturnProveReplicaUpdate2 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error `perm:"admin"` + ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"` ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error `perm:"admin"` + ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"` ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"` @@ -748,7 +759,9 @@ type StorageMinerStruct struct { SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` - SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber, p2 bool) error `perm:"admin"` + + SectorMatchPendingPiecesToOpenSectors func(p0 context.Context) error `perm:"admin"` SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"` @@ -794,6 +807,8 @@ type StorageMinerStruct struct { StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"` + StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"` StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` @@ -853,6 +868,8 @@ type WorkerStruct struct { FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` + Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"` MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` @@ -861,10 +878,16 @@ type WorkerStruct struct { ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` + ProveReplicaUpdate1 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) `perm:"admin"` + + ProveReplicaUpdate2 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) `perm:"admin"` + ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` + ReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"` SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` @@ -3690,14 +3713,14 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo return *new(map[abi.SectorNumber]string), ErrNotSupported } -func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) { if s.Internal.ComputeProof == nil { return *new([]builtin.PoStProof), ErrNotSupported } - return s.Internal.ComputeProof(p0, p1, p2) + return s.Internal.ComputeProof(p0, p1, p2, p3, p4) } -func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) { return *new([]builtin.PoStProof), ErrNotSupported } @@ -3954,6 +3977,17 @@ func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datat return ErrNotSupported } +func (s *StorageMinerStruct) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { + if s.Internal.MarketDataTransferDiagnostics == nil { + return nil, ErrNotSupported + } + return s.Internal.MarketDataTransferDiagnostics(p0, p1) +} + +func (s *StorageMinerStub) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { + return nil, ErrNotSupported +} + func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { if s.Internal.MarketDataTransferUpdates == nil { return nil, ErrNotSupported @@ -4218,6 +4252,17 @@ func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface return ErrNotSupported } +func (s *StorageMinerStruct) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnGenerateSectorKeyFromData == nil { + return ErrNotSupported + } + return s.Internal.ReturnGenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { if s.Internal.ReturnMoveStorage == nil { return ErrNotSupported @@ -4229,6 +4274,28 @@ func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.Ca return ErrNotSupported } +func (s *StorageMinerStruct) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate1 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate1(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + return ErrNotSupported +} + +func (s *StorageMinerStruct) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate2 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate2(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { if s.Internal.ReturnReadPiece == nil { return ErrNotSupported @@ -4251,6 +4318,17 @@ func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storifac return ErrNotSupported } +func (s *StorageMinerStruct) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + if s.Internal.ReturnReplicaUpdate == nil { + return ErrNotSupported + } + return s.Internal.ReturnReplicaUpdate(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { if s.Internal.ReturnSealCommit1 == nil { return ErrNotSupported @@ -4394,14 +4472,25 @@ func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration return *new(time.Duration), ErrNotSupported } -func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { +func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error { if s.Internal.SectorMarkForUpgrade == nil { return ErrNotSupported } - return s.Internal.SectorMarkForUpgrade(p0, p1) + return s.Internal.SectorMarkForUpgrade(p0, p1, p2) } -func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { +func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error { + return ErrNotSupported +} + +func (s *StorageMinerStruct) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error { + if s.Internal.SectorMatchPendingPiecesToOpenSectors == nil { + return ErrNotSupported + } + return s.Internal.SectorMatchPendingPiecesToOpenSectors(p0) +} + +func (s *StorageMinerStub) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error { return ErrNotSupported } @@ -4647,6 +4736,17 @@ func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID return *new([]stores.SectorStorageInfo), ErrNotSupported } +func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + if s.Internal.StorageGetLocks == nil { + return *new(storiface.SectorLocks), ErrNotSupported + } + return s.Internal.StorageGetLocks(p0) +} + +func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + return *new(storiface.SectorLocks), ErrNotSupported +} + func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { if s.Internal.StorageInfo == nil { return *new(stores.StorageInfo), ErrNotSupported @@ -4878,6 +4978,17 @@ func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 return *new(storiface.CallID), ErrNotSupported } +func (s *WorkerStruct) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + if s.Internal.GenerateSectorKeyFromData == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.GenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) { if s.Internal.Info == nil { return *new(storiface.WorkerInfo), ErrNotSupported @@ -4922,6 +5033,28 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { return *new(uuid.UUID), ErrNotSupported } +func (s *WorkerStruct) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate1 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate1(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + +func (s *WorkerStruct) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate2 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate2(p0, p1, p2, p3, p4, p5) +} + +func (s *WorkerStub) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { if s.Internal.ReleaseUnsealed == nil { return *new(storiface.CallID), ErrNotSupported @@ -4944,6 +5077,17 @@ func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error { return ErrNotSupported } +func (s *WorkerStruct) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + if s.Internal.ReplicaUpdate == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ReplicaUpdate(p0, p1, p2) +} + +func (s *WorkerStub) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { if s.Internal.SealCommit1 == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/api/types.go b/api/types.go index 0ecda0405..81345306d 100644 --- a/api/types.go +++ b/api/types.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" @@ -53,6 +54,30 @@ type MessageSendSpec struct { MaxFee abi.TokenAmount } +// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync +type GraphSyncDataTransfer struct { + // GraphSync request id for this transfer + RequestID graphsync.RequestID + // Graphsync state for this transfer + RequestState string + // If a channel ID is present, indicates whether this is the current graphsync request for this channel + // (could have changed in a restart) + IsCurrentChannelRequest bool + // Data transfer channel ID for this transfer + ChannelID *datatransfer.ChannelID + // Data transfer state for this transfer + ChannelState *DataTransferChannel + // Diagnostic information about this request -- and unexpected inconsistencies in + // request state + Diagnostics []string +} + +// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging +type TransferDiagnostics struct { + ReceivingTransfers []*GraphSyncDataTransfer + SendingTransfers []*GraphSyncDataTransfer +} + type DataTransferChannel struct { TransferID datatransfer.TransferID Status datatransfer.Status diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go index 18a5ec7d6..e3ba56899 100644 --- a/api/v0api/gateway.go +++ b/api/v0api/gateway.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -57,7 +57,7 @@ type Gateway interface { StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index af0687fe5..49ebad428 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -13,7 +13,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -451,7 +451,7 @@ type GatewayStruct struct { StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` - StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) `` + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) `` StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `` @@ -2703,15 +2703,15 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A return nil, ErrNotSupported } -func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { +func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { if s.Internal.StateNetworkVersion == nil { - return *new(network.Version), ErrNotSupported + return *new(abinetwork.Version), ErrNotSupported } return s.Internal.StateNetworkVersion(p0, p1) } -func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { - return *new(network.Version), ErrNotSupported +func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { + return *new(abinetwork.Version), ErrNotSupported } func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { diff --git a/api/version.go b/api/version.go index 93148f28d..228dcbd10 100644 --- a/api/version.go +++ b/api/version.go @@ -54,10 +54,10 @@ func VersionForType(nodeType NodeType) (Version, error) { // semver versions of the rpc api exposed var ( - FullAPIVersion0 = newVer(1, 4, 0) - FullAPIVersion1 = newVer(2, 1, 0) + FullAPIVersion0 = newVer(1, 5, 0) + FullAPIVersion1 = newVer(2, 2, 0) - MinerAPIVersion0 = newVer(1, 2, 0) + MinerAPIVersion0 = newVer(1, 3, 0) WorkerAPIVersion0 = newVer(1, 5, 0) ) diff --git a/blockstore/api.go b/blockstore/api.go index 6715b4766..dc4c03452 100644 --- a/blockstore/api.go +++ b/blockstore/api.go @@ -25,35 +25,35 @@ func NewAPIBlockstore(cio ChainIO) Blockstore { return Adapt(bs) // return an adapted blockstore. } -func (a *apiBlockstore) DeleteBlock(cid.Cid) error { +func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.New("not supported") } -func (a *apiBlockstore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) +func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return a.api.ChainHasObj(ctx, c) } -func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return nil, err } return blocks.NewBlockWithCid(bb, c) } -func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return 0, err } return len(bb), nil } -func (a *apiBlockstore) Put(blocks.Block) error { +func (a *apiBlockstore) Put(context.Context, blocks.Block) error { return xerrors.New("not supported") } -func (a *apiBlockstore) PutMany([]blocks.Block) error { +func (a *apiBlockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.New("not supported") } diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index a0b51d8df..270e5b820 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -525,7 +525,7 @@ func (b *Blockstore) Size() (int64, error) { // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. -func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { +func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) error) error { if err := b.access(); err != nil { return err } @@ -552,7 +552,7 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { } // Has implements Blockstore.Has. -func (b *Blockstore) Has(cid cid.Cid) (bool, error) { +func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if err := b.access(); err != nil { return false, err } @@ -582,7 +582,7 @@ func (b *Blockstore) Has(cid cid.Cid) (bool, error) { } // Get implements Blockstore.Get. -func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if !cid.Defined() { return nil, blockstore.ErrNotFound } @@ -619,7 +619,7 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { } // GetSize implements Blockstore.GetSize. -func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { +func (b *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if err := b.access(); err != nil { return 0, err } @@ -652,7 +652,7 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { } // Put implements Blockstore.Put. -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error { if err := b.access(); err != nil { return err } @@ -691,7 +691,7 @@ func (b *Blockstore) Put(block blocks.Block) error { } // PutMany implements Blockstore.PutMany. -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { if err := b.access(); err != nil { return err } @@ -755,7 +755,7 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error { } // DeleteBlock implements Blockstore.DeleteBlock. -func (b *Blockstore) DeleteBlock(cid cid.Cid) error { +func (b *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { if err := b.access(); err != nil { return err } @@ -774,7 +774,7 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error { }) } -func (b *Blockstore) DeleteMany(cids []cid.Cid) error { +func (b *Blockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { if err := b.access(); err != nil { return err } diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index d8ef5241b..4619d4ec3 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -2,6 +2,7 @@ package badgerbs import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -98,6 +99,7 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, } func testMove(t *testing.T, optsF func(string) Options) { + ctx := context.Background() basePath, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) @@ -122,7 +124,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // add some blocks for i := 0; i < 10; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { t.Fatal(err) } @@ -132,7 +134,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // delete some of them for i := 5; i < 10; i++ { c := have[i].Cid() - err := db.DeleteBlock(c) + err := db.DeleteBlock(ctx, c) if err != nil { t.Fatal(err) } @@ -145,7 +147,7 @@ func testMove(t *testing.T, optsF func(string) Options) { g.Go(func() error { for i := 10; i < 1000; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { return err } @@ -165,7 +167,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // now check that we have all the blocks in have and none in the deleted lists checkBlocks := func() { for _, blk := range have { - has, err := db.Has(blk.Cid()) + has, err := db.Has(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -174,7 +176,7 @@ func testMove(t *testing.T, optsF func(string) Options) { t.Fatal("missing block") } - blk2, err := db.Get(blk.Cid()) + blk2, err := db.Get(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testMove(t *testing.T, optsF func(string) Options) { } for _, c := range deleted { - has, err := db.Has(c) + has, err := db.Has(ctx, c) if err != nil { t.Fatal(err) } diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go index 93be82ac8..167d1b2ab 100644 --- a/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -44,28 +44,31 @@ func (s *Suite) RunTests(t *testing.T, prefix string) { } func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } c := cid.NewCidV0(u.Hash([]byte("stuff"))) - bl, err := bs.Get(c) + bl, err := bs.Get(ctx, c) require.Nil(t, bl) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } - _, err := bs.Get(cid.Undef) + _, err := bs.Get(ctx, cid.Undef) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestPutThenGetBlock(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -73,15 +76,16 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestHas(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -89,19 +93,20 @@ func (s *Suite) TestHas(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - ok, err := bs.Has(orig.Cid()) + ok, err := bs.Has(ctx, orig.Cid()) require.NoError(t, err) require.True(t, ok) - ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid()) + ok, err = bs.Has(ctx, blocks.NewBlock([]byte("another thing")).Cid()) require.NoError(t, err) require.False(t, ok) } func (s *Suite) TestCidv0v1(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -109,15 +114,17 @@ func (s *Suite) TestCidv0v1(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) + fetched, err := bs.Get(ctx, cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + ctx := context.Background() + bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -127,21 +134,21 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { missingBlock := blocks.NewBlock([]byte("missingBlock")) emptyBlock := blocks.NewBlock([]byte{}) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) - blockSize, err := bs.GetSize(block.Cid()) + blockSize, err := bs.GetSize(ctx, block.Cid()) require.NoError(t, err) require.Len(t, block.RawData(), blockSize) - err = bs.Put(emptyBlock) + err = bs.Put(ctx, emptyBlock) require.NoError(t, err) - emptySize, err := bs.GetSize(emptyBlock.Cid()) + emptySize, err := bs.GetSize(ctx, emptyBlock.Cid()) require.NoError(t, err) require.Zero(t, emptySize) - missingSize, err := bs.GetSize(missingBlock.Cid()) + missingSize, err := bs.GetSize(ctx, missingBlock.Cid()) require.Equal(t, blockstore.ErrNotFound, err) require.Equal(t, -1, missingSize) } @@ -203,6 +210,7 @@ func (s *Suite) TestDoubleClose(t *testing.T) { } func (s *Suite) TestReopenPutGet(t *testing.T) { + ctx := context.Background() bs, path := s.NewBlockstore(t) c, ok := bs.(io.Closer) if !ok { @@ -210,7 +218,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) err = c.Close() @@ -219,7 +227,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { bs, err = s.OpenBlockstore(t, path) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) @@ -228,6 +236,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } func (s *Suite) TestPutMany(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -238,15 +247,15 @@ func (s *Suite) TestPutMany(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) for _, blk := range blks { - fetched, err := bs.Get(blk.Cid()) + fetched, err := bs.Get(ctx, blk.Cid()) require.NoError(t, err) require.Equal(t, blk.RawData(), fetched.RawData()) - ok, err := bs.Has(blk.Cid()) + ok, err := bs.Has(ctx, blk.Cid()) require.NoError(t, err) require.True(t, ok) } @@ -259,6 +268,7 @@ func (s *Suite) TestPutMany(t *testing.T) { } func (s *Suite) TestDelete(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -269,10 +279,10 @@ func (s *Suite) TestDelete(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) - err = bs.DeleteBlock(blks[1].Cid()) + err = bs.DeleteBlock(ctx, blks[1].Cid()) require.NoError(t, err) ch, err := bs.AllKeysChan(context.Background()) @@ -285,17 +295,17 @@ func (s *Suite) TestDelete(t *testing.T) { cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()), }) - has, err := bs.Has(blks[1].Cid()) + has, err := bs.Has(ctx, blks[1].Cid()) require.NoError(t, err) require.False(t, has) - } func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid { + ctx := context.Background() keys := make([]cid.Cid, count) for i := 0; i < count; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what // the store returns. diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 8ede31eb9..409c100cf 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -1,6 +1,8 @@ package blockstore import ( + "context" + cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -27,7 +29,7 @@ type BasicBlockstore = blockstore.Blockstore type Viewer = blockstore.Viewer type BatchDeleter interface { - DeleteMany(cids []cid.Cid) error + DeleteMany(ctx context.Context, cids []cid.Cid) error } // BlockstoreIterator is a trait for efficient iteration @@ -93,17 +95,17 @@ type adaptedBlockstore struct { var _ Blockstore = (*adaptedBlockstore)(nil) -func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error { - blk, err := a.Get(cid) +func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(ctx, cid) if err != nil { return err } return callback(blk.RawData()) } -func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error { +func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { for _, cid := range cids { - err := a.DeleteBlock(cid) + err := a.DeleteBlock(ctx, cid) if err != nil { return err } diff --git a/blockstore/buffered.go b/blockstore/buffered.go index 5d3d38f78..8e23b5362 100644 --- a/blockstore/buffered.go +++ b/blockstore/buffered.go @@ -88,34 +88,34 @@ func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, return out, nil } -func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error { - if err := bs.read.DeleteBlock(c); err != nil { +func (bs *BufferedBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + if err := bs.read.DeleteBlock(ctx, c); err != nil { return err } - return bs.write.DeleteBlock(c) + return bs.write.DeleteBlock(ctx, c) } -func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error { - if err := bs.read.DeleteMany(cids); err != nil { +func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + if err := bs.read.DeleteMany(ctx, cids); err != nil { return err } - return bs.write.DeleteMany(cids) + return bs.write.DeleteMany(ctx, cids) } -func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error { +func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { // both stores are viewable. - if err := bs.write.View(c, callback); err == ErrNotFound { + if err := bs.write.View(ctx, c, callback); err == ErrNotFound { // not found in write blockstore; fall through. } else { return err // propagate errors, or nil, i.e. found. } - return bs.read.View(c, callback) + return bs.read.View(ctx, c, callback) } -func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.write.Get(c); err != nil { +func (bs *BufferedBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(ctx, c); err != nil { if err != ErrNotFound { return nil, err } @@ -123,20 +123,20 @@ func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { return out, nil } - return bs.read.Get(c) + return bs.read.Get(ctx, c) } -func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) { - s, err := bs.read.GetSize(c) +func (bs *BufferedBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + s, err := bs.read.GetSize(ctx, c) if err == ErrNotFound || s == 0 { - return bs.write.GetSize(c) + return bs.write.GetSize(ctx, c) } return s, err } -func (bs *BufferedBlockstore) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check +func (bs *BufferedBlockstore) Put(ctx context.Context, blk block.Block) error { + has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check if err != nil { return err } @@ -145,11 +145,11 @@ func (bs *BufferedBlockstore) Put(blk block.Block) error { return nil } - return bs.write.Put(blk) + return bs.write.Put(ctx, blk) } -func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { - has, err := bs.write.Has(c) +func (bs *BufferedBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + has, err := bs.write.Has(ctx, c) if err != nil { return false, err } @@ -157,7 +157,7 @@ func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { return true, nil } - return bs.read.Has(c) + return bs.read.Has(ctx, c) } func (bs *BufferedBlockstore) HashOnRead(hor bool) { @@ -165,8 +165,8 @@ func (bs *BufferedBlockstore) HashOnRead(hor bool) { bs.write.HashOnRead(hor) } -func (bs *BufferedBlockstore) PutMany(blks []block.Block) error { - return bs.write.PutMany(blks) +func (bs *BufferedBlockstore) PutMany(ctx context.Context, blks []block.Block) error { + return bs.write.PutMany(ctx, blks) } func (bs *BufferedBlockstore) Read() Blockstore { diff --git a/blockstore/discard.go b/blockstore/discard.go index afd0651bc..575c752d4 100644 --- a/blockstore/discard.go +++ b/blockstore/discard.go @@ -18,39 +18,39 @@ func NewDiscardStore(bs Blockstore) Blockstore { return &discardstore{bs: bs} } -func (b *discardstore) Has(cid cid.Cid) (bool, error) { - return b.bs.Has(cid) +func (b *discardstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + return b.bs.Has(ctx, cid) } func (b *discardstore) HashOnRead(hor bool) { b.bs.HashOnRead(hor) } -func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { - return b.bs.Get(cid) +func (b *discardstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(ctx, cid) } -func (b *discardstore) GetSize(cid cid.Cid) (int, error) { - return b.bs.GetSize(cid) +func (b *discardstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + return b.bs.GetSize(ctx, cid) } -func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { - return b.bs.View(cid, f) +func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + return b.bs.View(ctx, cid, f) } -func (b *discardstore) Put(blk blocks.Block) error { +func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error { return nil } -func (b *discardstore) PutMany(blks []blocks.Block) error { +func (b *discardstore) PutMany(ctx context.Context, blks []blocks.Block) error { return nil } -func (b *discardstore) DeleteBlock(cid cid.Cid) error { +func (b *discardstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return nil } -func (b *discardstore) DeleteMany(cids []cid.Cid) error { +func (b *discardstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { return nil } diff --git a/blockstore/fallback.go b/blockstore/fallback.go index 5f220f941..3d0acd36d 100644 --- a/blockstore/fallback.go +++ b/blockstore/fallback.go @@ -71,14 +71,14 @@ func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { // chain bitswap puts blocks in temp blockstore which is cleaned up // every few min (to drop any messages we fetched but don't want) // in this case we want to keep this block around - if err := fbs.Put(b); err != nil { + if err := fbs.Put(ctx, b); err != nil { return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err) } return b, nil } -func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { - b, err := fbs.Blockstore.Get(c) +func (fbs *FallbackStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + b, err := fbs.Blockstore.Get(ctx, c) switch err { case nil: return b, nil @@ -89,8 +89,8 @@ func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { } } -func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { - sz, err := fbs.Blockstore.GetSize(c) +func (fbs *FallbackStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + sz, err := fbs.Blockstore.GetSize(ctx, c) switch err { case nil: return sz, nil diff --git a/blockstore/idstore.go b/blockstore/idstore.go index e6148ff04..c6281998a 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -38,7 +38,7 @@ func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) { return false, nil, err } -func (b *idstore) Has(cid cid.Cid) (bool, error) { +func (b *idstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { inline, _, err := decodeCid(cid) if err != nil { return false, xerrors.Errorf("error decoding Cid: %w", err) @@ -48,10 +48,10 @@ func (b *idstore) Has(cid cid.Cid) (bool, error) { return true, nil } - return b.bs.Has(cid) + return b.bs.Has(ctx, cid) } -func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *idstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { inline, data, err := decodeCid(cid) if err != nil { return nil, xerrors.Errorf("error decoding Cid: %w", err) @@ -61,10 +61,10 @@ func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } - return b.bs.Get(cid) + return b.bs.Get(ctx, cid) } -func (b *idstore) GetSize(cid cid.Cid) (int, error) { +func (b *idstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { inline, data, err := decodeCid(cid) if err != nil { return 0, xerrors.Errorf("error decoding Cid: %w", err) @@ -74,10 +74,10 @@ func (b *idstore) GetSize(cid cid.Cid) (int, error) { return len(data), err } - return b.bs.GetSize(cid) + return b.bs.GetSize(ctx, cid) } -func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { +func (b *idstore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { inline, data, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -87,10 +87,10 @@ func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { return cb(data) } - return b.bs.View(cid, cb) + return b.bs.View(ctx, cid, cb) } -func (b *idstore) Put(blk blocks.Block) error { +func (b *idstore) Put(ctx context.Context, blk blocks.Block) error { inline, _, err := decodeCid(blk.Cid()) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -100,10 +100,10 @@ func (b *idstore) Put(blk blocks.Block) error { return nil } - return b.bs.Put(blk) + return b.bs.Put(ctx, blk) } -func (b *idstore) PutMany(blks []blocks.Block) error { +func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error { toPut := make([]blocks.Block, 0, len(blks)) for _, blk := range blks { inline, _, err := decodeCid(blk.Cid()) @@ -118,13 +118,13 @@ func (b *idstore) PutMany(blks []blocks.Block) error { } if len(toPut) > 0 { - return b.bs.PutMany(toPut) + return b.bs.PutMany(ctx, toPut) } return nil } -func (b *idstore) DeleteBlock(cid cid.Cid) error { +func (b *idstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { inline, _, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -134,10 +134,10 @@ func (b *idstore) DeleteBlock(cid cid.Cid) error { return nil } - return b.bs.DeleteBlock(cid) + return b.bs.DeleteBlock(ctx, cid) } -func (b *idstore) DeleteMany(cids []cid.Cid) error { +func (b *idstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { toDelete := make([]cid.Cid, 0, len(cids)) for _, cid := range cids { inline, _, err := decodeCid(cid) @@ -152,7 +152,7 @@ func (b *idstore) DeleteMany(cids []cid.Cid) error { } if len(toDelete) > 0 { - return b.bs.DeleteMany(toDelete) + return b.bs.DeleteMany(ctx, toDelete) } return nil diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 51b4bd951..787c71d7d 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -79,12 +79,12 @@ func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onl return Adapt(bs), nil } -func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error { +func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return xerrors.Errorf("not supported") } -func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { - _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -99,8 +99,8 @@ func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { return true, nil } -func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { - rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + rd, err := i.api.Block().Get(ctx, path.IpldPath(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) } @@ -113,8 +113,8 @@ func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { - st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + st, err := i.api.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) } @@ -122,23 +122,23 @@ func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { return st.Size(), nil } -func (i *IPFSBlockstore) Put(block blocks.Block) error { +func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error { mhd, err := multihash.Decode(block.Cid().Hash()) if err != nil { return err } - _, err = i.api.Block().Put(i.ctx, bytes.NewReader(block.RawData()), + _, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()), options.Block.Hash(mhd.Code, mhd.Length), options.Block.Format(cid.CodecToStr[block.Cid().Type()])) return err } -func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error { +func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { // TODO: could be done in parallel for _, block := range blocks { - if err := i.Put(block); err != nil { + if err := i.Put(ctx, block); err != nil { return err } } diff --git a/blockstore/mem.go b/blockstore/mem.go index 8ea69d46a..d6b14f002 100644 --- a/blockstore/mem.go +++ b/blockstore/mem.go @@ -15,24 +15,24 @@ func NewMemory() MemBlockstore { // MemBlockstore is a terminal blockstore that keeps blocks in memory. type MemBlockstore map[cid.Cid]blocks.Block -func (m MemBlockstore) DeleteBlock(k cid.Cid) error { +func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { delete(m, k) return nil } -func (m MemBlockstore) DeleteMany(ks []cid.Cid) error { +func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { for _, k := range ks { delete(m, k) } return nil } -func (m MemBlockstore) Has(k cid.Cid) (bool, error) { +func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } -func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { b, ok := m[k] if !ok { return ErrNotFound @@ -40,7 +40,7 @@ func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { return callback(b.RawData()) } -func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { return nil, ErrNotFound @@ -49,7 +49,7 @@ func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { } // GetSize returns the CIDs mapped BlockSize -func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { +func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { b, ok := m[k] if !ok { return 0, ErrNotFound @@ -58,7 +58,7 @@ func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { } // Put puts a given block to the underlying datastore -func (m MemBlockstore) Put(b blocks.Block) error { +func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error { // Convert to a basic block for safety, but try to reuse the existing // block if it's already a basic block. k := b.Cid() @@ -76,9 +76,9 @@ func (m MemBlockstore) Put(b blocks.Block) error { // PutMany puts a slice of blocks at the same time using batching // capabilities of the underlying datastore whenever possible. -func (m MemBlockstore) PutMany(bs []blocks.Block) error { +func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { for _, b := range bs { - _ = m.Put(b) // can't fail + _ = m.Put(ctx, b) // can't fail } return nil } diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 0e34fe952..f6715ea33 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -18,6 +18,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" @@ -47,6 +49,9 @@ var ( enableDebugLog = false // set this to true if you want to track origin stack traces in the write log enableDebugLogWriteTraces = false + + // upgradeBoundary is the boundary before and after an upgrade where we suppress compaction + upgradeBoundary = build.Finality ) func init() { @@ -98,6 +103,12 @@ type ChainAccessor interface { SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) } +// upgradeRange is a precomputed epoch range during which we shouldn't compact so as to not +// interfere with an upgrade +type upgradeRange struct { + start, end abi.ChainEpoch +} + // hotstore is the interface that must be satisfied by the hot blockstore; it is an extension // of the Blockstore interface with the traits we need for compaction. type hotstore interface { @@ -125,6 +136,8 @@ type SplitStore struct { cold bstore.Blockstore hot hotstore + upgrades []upgradeRange + markSetEnv MarkSetEnv markSetSize int64 @@ -203,17 +216,17 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co } // Blockstore interface -func (s *SplitStore) DeleteBlock(_ cid.Cid) error { +func (s *SplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) DeleteMany(_ []cid.Cid) error { +func (s *SplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) Has(cid cid.Cid) (bool, error) { +func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if isIdentiyCid(cid) { return true, nil } @@ -221,7 +234,7 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - has, err := s.hot.Has(cid) + has, err := s.hot.Has(ctx, cid) if err != nil { return has, err @@ -232,10 +245,10 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { return true, nil } - return s.cold.Has(cid) + return s.cold.Has(ctx, cid) } -func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { +func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -248,7 +261,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - blk, err := s.hot.Get(cid) + blk, err := s.hot.Get(ctx, cid) switch err { case nil: @@ -260,7 +273,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.debug.LogReadMiss(cid) } - blk, err = s.cold.Get(cid) + blk, err = s.cold.Get(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) @@ -272,7 +285,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { } } -func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { +func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -285,7 +298,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - size, err := s.hot.GetSize(cid) + size, err := s.hot.GetSize(ctx, cid) switch err { case nil: @@ -297,7 +310,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.debug.LogReadMiss(cid) } - size, err = s.cold.GetSize(cid) + size, err = s.cold.GetSize(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -308,7 +321,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { } } -func (s *SplitStore) Put(blk blocks.Block) error { +func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { if isIdentiyCid(blk.Cid()) { return nil } @@ -316,7 +329,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.Put(blk) + err := s.hot.Put(ctx, blk) if err != nil { return err } @@ -327,7 +340,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { return nil } -func (s *SplitStore) PutMany(blks []blocks.Block) error { +func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { // filter identites idcids := 0 for _, blk := range blks { @@ -361,7 +374,7 @@ func (s *SplitStore) PutMany(blks []blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.PutMany(blks) + err := s.hot.PutMany(ctx, blks) if err != nil { return err } @@ -417,7 +430,7 @@ func (s *SplitStore) HashOnRead(enabled bool) { s.cold.HashOnRead(enabled) } -func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { +func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -438,14 +451,14 @@ func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { s.protectView(cid) defer s.viewDone() - err := s.hot.View(cid, cb) + err := s.hot.View(ctx, cid, cb) switch err { case bstore.ErrNotFound: if s.isWarm() { s.debug.LogReadMiss(cid) } - err = s.cold.View(cid, cb) + err = s.cold.View(ctx, cid, cb) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -463,16 +476,33 @@ func (s *SplitStore) isWarm() bool { } // State tracking -func (s *SplitStore) Start(chain ChainAccessor) error { +func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error { s.chain = chain curTs := chain.GetHeaviestTipSet() + // precompute the upgrade boundaries + s.upgrades = make([]upgradeRange, 0, len(us)) + for _, upgrade := range us { + boundary := upgrade.Height + for _, pre := range upgrade.PreMigrations { + preMigrationBoundary := upgrade.Height - pre.StartWithin + if preMigrationBoundary < boundary { + boundary = preMigrationBoundary + } + } + + upgradeStart := boundary - upgradeBoundary + upgradeEnd := upgrade.Height + upgradeBoundary + + s.upgrades = append(s.upgrades, upgradeRange{start: upgradeStart, end: upgradeEnd}) + } + // should we warmup warmup := false // load base epoch from metadata ds // if none, then use current epoch because it's a fresh start - bs, err := s.ds.Get(baseEpochKey) + bs, err := s.ds.Get(s.ctx, baseEpochKey) switch err { case nil: s.baseEpoch = bytesToEpoch(bs) @@ -493,7 +523,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load warmup epoch from metadata ds - bs, err = s.ds.Get(warmupEpochKey) + bs, err = s.ds.Get(s.ctx, warmupEpochKey) switch err { case nil: s.warmupEpoch = bytesToEpoch(bs) @@ -506,7 +536,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load markSetSize from metadata ds to provide a size hint for marksets - bs, err = s.ds.Get(markSetSizeKey) + bs, err = s.ds.Get(s.ctx, markSetSizeKey) switch err { case nil: s.markSetSize = bytesToInt64(bs) @@ -517,7 +547,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc - bs, err = s.ds.Get(compactionIndexKey) + bs, err = s.ds.Get(s.ctx, compactionIndexKey) switch err { case nil: s.compactionIndex = bytesToInt64(bs) @@ -579,5 +609,5 @@ func (s *SplitStore) checkClosing() error { func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { s.baseEpoch = epoch - return s.ds.Put(baseEpochKey, epochToBytes(epoch)) + return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch)) } diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 8907abf9e..c83ed7b28 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -96,7 +96,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return errStopWalk } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking hotstore: %w", err) } @@ -105,7 +105,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return nil } - has, err = s.cold.Has(c) + has, err = s.cold.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking coldstore: %w", err) } diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 4ff38a5fb..13ab90ac0 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -99,6 +99,12 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } + if s.isNearUpgrade(epoch) { + // we are near an upgrade epoch, suppress compaction + atomic.StoreInt32(&s.compacting, 0) + return nil + } + if epoch-s.baseEpoch > CompactionThreshold { // it's time to compact -- prepare the transaction and go! s.beginTxnProtect() @@ -121,6 +127,16 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } +func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool { + for _, upgrade := range s.upgrades { + if epoch >= upgrade.start && epoch <= upgrade.end { + return true + } + } + + return false +} + // transactionally protect incoming tipsets func (s *SplitStore) protectTipSets(apply []*types.TipSet) { s.txnLk.RLock() @@ -561,13 +577,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error saving base epoch: %w", err) } - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { return xerrors.Errorf("error saving mark set size: %w", err) } s.compactionIndex++ - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } @@ -819,10 +835,10 @@ func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { return cb(data) } - err := s.hot.View(c, cb) + err := s.hot.View(s.ctx, c, cb) switch err { case bstore.ErrNotFound: - return s.cold.View(c, cb) + return s.cold.View(s.ctx, c, cb) default: return err @@ -834,13 +850,13 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) { return true, nil } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if has || err != nil { return has, err } - return s.cold.Has(c) + return s.cold.Has(s.ctx, c) } func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { @@ -851,7 +867,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { return err } - blk, err := s.hot.Get(c) + blk, err := s.hot.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { log.Warnf("hotstore missing block %s", c) @@ -863,7 +879,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { batch = append(batch, blk) if len(batch) == batchSize { - err = s.cold.PutMany(batch) + err = s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -872,7 +888,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { } if len(batch) > 0 { - err := s.cold.PutMany(batch) + err := s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -1042,7 +1058,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { deadCids = append(deadCids, c) } - err := s.hot.DeleteMany(deadCids) + err := s.hot.DeleteMany(s.ctx, deadCids) if err != nil { return xerrors.Errorf("error purging cold objects: %w", err) } diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go index 1065e460c..6f838229d 100644 --- a/blockstore/splitstore/splitstore_expose.go +++ b/blockstore/splitstore/splitstore_expose.go @@ -20,28 +20,28 @@ func (s *SplitStore) Expose() bstore.Blockstore { return &exposedSplitStore{s: s} } -func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { +func (es *exposedSplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { return errors.New("DeleteBlock: operation not supported") } -func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { +func (es *exposedSplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { return errors.New("DeleteMany: operation not supported") } -func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { +func (es *exposedSplitStore) Has(ctx context.Context, c cid.Cid) (bool, error) { if isIdentiyCid(c) { return true, nil } - has, err := es.s.hot.Has(c) + has, err := es.s.hot.Has(ctx, c) if has || err != nil { return has, err } - return es.s.cold.Has(c) + return es.s.cold.Has(ctx, c) } -func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { +func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -51,16 +51,16 @@ func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, c) } - blk, err := es.s.hot.Get(c) + blk, err := es.s.hot.Get(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.Get(c) + return es.s.cold.Get(ctx, c) default: return blk, err } } -func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { +func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -70,21 +70,21 @@ func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { return len(data), nil } - size, err := es.s.hot.GetSize(c) + size, err := es.s.hot.GetSize(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.GetSize(c) + return es.s.cold.GetSize(ctx, c) default: return size, err } } -func (es *exposedSplitStore) Put(blk blocks.Block) error { - return es.s.Put(blk) +func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error { + return es.s.Put(ctx, blk) } -func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { - return es.s.PutMany(blks) +func (es *exposedSplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { + return es.s.PutMany(ctx, blks) } func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { @@ -93,7 +93,7 @@ func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, e func (es *exposedSplitStore) HashOnRead(enabled bool) {} -func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { +func (es *exposedSplitStore) View(ctx context.Context, c cid.Cid, f func([]byte) error) error { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -103,10 +103,10 @@ func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { return f(data) } - err := es.s.hot.View(c, f) + err := es.s.hot.View(ctx, c, f) switch err { case bstore.ErrNotFound: - return es.s.cold.View(c, f) + return es.s.cold.View(ctx, c, f) default: return err diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index df9984d41..7d84e0a4c 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" @@ -29,6 +30,7 @@ func init() { } func testSplitStore(t *testing.T, cfg *Config) { + ctx := context.Background() chain := &mockChain{t: t} // the myriads of stores @@ -38,7 +40,7 @@ func testSplitStore(t *testing.T, cfg *Config) { // this is necessary to avoid the garbage mock puts in the blocks garbage := blocks.NewBlock([]byte{1, 2, 3}) - err := cold.Put(garbage) + err := cold.Put(ctx, garbage) if err != nil { t.Fatal(err) } @@ -59,21 +61,21 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } - err = cold.Put(blk) + err = cold.Put(ctx, blk) if err != nil { t.Fatal(err) } // create a garbage block that is protected with a rgistered protector protected := blocks.NewBlock([]byte("protected!")) - err = hot.Put(protected) + err = hot.Put(ctx, protected) if err != nil { t.Fatal(err) } // and another one that is not protected unprotected := blocks.NewBlock([]byte("unprotected!")) - err = hot.Put(unprotected) + err = hot.Put(ctx, unprotected) if err != nil { t.Fatal(err) } @@ -90,7 +92,7 @@ func testSplitStore(t *testing.T, cfg *Config) { return protect(protected.Cid()) }) - err = ss.Start(chain) + err = ss.Start(chain, nil) if err != nil { t.Fatal(err) } @@ -108,11 +110,11 @@ func testSplitStore(t *testing.T, cfg *Config) { if err != nil { t.Fatal(err) } - err = ss.Put(stateRoot) + err = ss.Put(ctx, stateRoot) if err != nil { t.Fatal(err) } - err = ss.Put(sblk) + err = ss.Put(ctx, sblk) if err != nil { t.Fatal(err) } @@ -175,7 +177,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our protected block is still there - has, err := hot.Has(protected.Cid()) + has, err := hot.Has(ctx, protected.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our unprotected block is in the coldstore now - has, err = hot.Has(unprotected.Cid()) + has, err = hot.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -194,7 +196,7 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal("unprotected block is still in hotstore") } - has, err = cold.Has(unprotected.Cid()) + has, err = cold.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -220,6 +222,141 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) { testSplitStore(t, &Config{MarkSetType: "badger"}) } +func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { + ctx := context.Background() + chain := &mockChain{t: t} + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(ctx, garbage) + if err != nil { + t.Fatal(err) + } + + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + + genTs := mock.TipSet(genBlock) + chain.push(genTs) + + // put the genesis block to cold store + blk, err := genBlock.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + err = cold.Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + + // open the splitstore + ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + // create an upgrade schedule that will suppress compaction during the test + upgradeBoundary = 0 + upgrade := stmgr.Upgrade{ + Height: 10, + PreMigrations: []stmgr.PreMigration{{StartWithin: 10}}, + } + + err = ss.Start(chain, []stmgr.Upgrade{upgrade}) + if err != nil { + t.Fatal(err) + } + + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, stateRoot) + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, sblk) + if err != nil { + t.Fatal(err) + } + ts := mock.TipSet(blk) + chain.push(ts) + + return ts + } + + waitForCompaction := func() { + for atomic.LoadInt32(&ss.compacting) == 1 { + time.Sleep(100 * time.Millisecond) + } + } + + curTs := genTs + for i := 1; i < 10; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + countBlocks := func(bs blockstore.Blockstore) int { + count := 0 + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { + count++ + return nil + }) + return count + } + + // we should not have compacted due to suppression and everything should still be hot + hotCnt := countBlocks(hot) + coldCnt := countBlocks(cold) + + if hotCnt != 20 { + t.Errorf("expected %d blocks, but got %d", 20, hotCnt) + } + + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) + } + + // put some more blocks, now we should compact + for i := 10; i < 20; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + hotCnt = countBlocks(hot) + coldCnt = countBlocks(cold) + + if hotCnt != 24 { + t.Errorf("expected %d blocks, but got %d", 24, hotCnt) + } + + if coldCnt != 18 { + t.Errorf("expected %d blocks, but got %d", 18, coldCnt) + } +} + type mockChain struct { t testing.TB @@ -296,7 +433,7 @@ func newMockStore() *mockStore { return &mockStore{set: make(map[cid.Cid]blocks.Block)} } -func (b *mockStore) Has(cid cid.Cid) (bool, error) { +func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { b.mx.Lock() defer b.mx.Unlock() _, ok := b.set[cid] @@ -305,7 +442,7 @@ func (b *mockStore) Has(cid cid.Cid) (bool, error) { func (b *mockStore) HashOnRead(hor bool) {} -func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { b.mx.Lock() defer b.mx.Unlock() @@ -316,8 +453,8 @@ func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { return blk, nil } -func (b *mockStore) GetSize(cid cid.Cid) (int, error) { - blk, err := b.Get(cid) +func (b *mockStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + blk, err := b.Get(ctx, cid) if err != nil { return 0, err } @@ -325,15 +462,15 @@ func (b *mockStore) GetSize(cid cid.Cid) (int, error) { return len(blk.RawData()), nil } -func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { - blk, err := b.Get(cid) +func (b *mockStore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(ctx, cid) if err != nil { return err } return f(blk.RawData()) } -func (b *mockStore) Put(blk blocks.Block) error { +func (b *mockStore) Put(_ context.Context, blk blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -341,7 +478,7 @@ func (b *mockStore) Put(blk blocks.Block) error { return nil } -func (b *mockStore) PutMany(blks []blocks.Block) error { +func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -351,7 +488,7 @@ func (b *mockStore) PutMany(blks []blocks.Block) error { return nil } -func (b *mockStore) DeleteBlock(cid cid.Cid) error { +func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() @@ -359,7 +496,7 @@ func (b *mockStore) DeleteBlock(cid cid.Cid) error { return nil } -func (b *mockStore) DeleteMany(cids []cid.Cid) error { +func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 216de571a..2a39f6c9d 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -75,7 +75,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { count++ - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return err } @@ -84,7 +84,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { return nil } - blk, err := s.cold.Get(c) + blk, err := s.cold.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { missing++ @@ -97,7 +97,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { batchHot = append(batchHot, blk) if len(batchHot) == batchSize { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { return err } @@ -112,7 +112,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { } if len(batchHot) > 0 { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { return err } @@ -121,13 +121,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) s.markSetSize = count + count>>2 // overestimate a bit - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { log.Warnf("error saving mark set size: %s", err) } // save the warmup epoch - err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + err = s.ds.Put(s.ctx, warmupEpochKey, epochToBytes(epoch)) if err != nil { return xerrors.Errorf("error saving warm up epoch: %w", err) } @@ -136,7 +136,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { s.mx.Unlock() // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } diff --git a/blockstore/sync.go b/blockstore/sync.go index 848ccd19d..1b4ad8297 100644 --- a/blockstore/sync.go +++ b/blockstore/sync.go @@ -20,53 +20,53 @@ type SyncBlockstore struct { bs MemBlockstore // specifically use a memStore to save indirection overhead. } -func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error { +func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteBlock(k) + return m.bs.DeleteBlock(ctx, k) } -func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error { +func (m *SyncBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteMany(ks) + return m.bs.DeleteMany(ctx, ks) } -func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) { +func (m *SyncBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Has(k) + return m.bs.Has(ctx, k) } -func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m *SyncBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.View(k, callback) + return m.bs.View(ctx, k, callback) } -func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m *SyncBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Get(k) + return m.bs.Get(ctx, k) } -func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) { +func (m *SyncBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.GetSize(k) + return m.bs.GetSize(ctx, k) } -func (m *SyncBlockstore) Put(b blocks.Block) error { +func (m *SyncBlockstore) Put(ctx context.Context, b blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.Put(b) + return m.bs.Put(ctx, b) } -func (m *SyncBlockstore) PutMany(bs []blocks.Block) error { +func (m *SyncBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.PutMany(bs) + return m.bs.PutMany(ctx, bs) } func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed.go b/blockstore/timed.go index b279943b6..8f226b0b1 100644 --- a/blockstore/timed.go +++ b/blockstore/timed.go @@ -92,28 +92,28 @@ func (t *TimedCacheBlockstore) rotate() { t.mu.Unlock() } -func (t *TimedCacheBlockstore) Put(b blocks.Block) error { +func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. t.mu.Lock() defer t.mu.Unlock() - return t.active.Put(b) + return t.active.Put(ctx, b) } -func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error { +func (t *TimedCacheBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { t.mu.Lock() defer t.mu.Unlock() - return t.active.PutMany(bs) + return t.active.PutMany(ctx, bs) } -func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (t *TimedCacheBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { // The underlying blockstore is always a "mem" blockstore so there's no difference, // from a performance perspective, between view & get. So we call Get to avoid // calling an arbitrary callback while holding a lock. t.mu.RLock() - block, err := t.active.Get(k) + block, err := t.active.Get(ctx, k) if err == ErrNotFound { - block, err = t.inactive.Get(k) + block, err = t.inactive.Get(ctx, k) } t.mu.RUnlock() @@ -123,51 +123,51 @@ func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) erro return callback(block.RawData()) } -func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (t *TimedCacheBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { t.mu.RLock() defer t.mu.RUnlock() - b, err := t.active.Get(k) + b, err := t.active.Get(ctx, k) if err == ErrNotFound { - b, err = t.inactive.Get(k) + b, err = t.inactive.Get(ctx, k) } return b, err } -func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) { +func (t *TimedCacheBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { t.mu.RLock() defer t.mu.RUnlock() - size, err := t.active.GetSize(k) + size, err := t.active.GetSize(ctx, k) if err == ErrNotFound { - size, err = t.inactive.GetSize(k) + size, err = t.inactive.GetSize(ctx, k) } return size, err } -func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) { +func (t *TimedCacheBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { t.mu.RLock() defer t.mu.RUnlock() - if has, err := t.active.Has(k); err != nil { + if has, err := t.active.Has(ctx, k); err != nil { return false, err } else if has { return true, nil } - return t.inactive.Has(k) + return t.inactive.Has(ctx, k) } func (t *TimedCacheBlockstore) HashOnRead(_ bool) { // no-op } -func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) + return multierr.Combine(t.active.DeleteBlock(ctx, k), t.inactive.DeleteBlock(ctx, k)) } -func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks)) + return multierr.Combine(t.active.DeleteMany(ctx, ks), t.inactive.DeleteMany(ctx, ks)) } func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed_test.go b/blockstore/timed_test.go index d5fefff94..16795f047 100644 --- a/blockstore/timed_test.go +++ b/blockstore/timed_test.go @@ -19,6 +19,8 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { tc.clock = mClock tc.doneRotatingCh = make(chan struct{}) + ctx := context.Background() + _ = tc.Start(context.Background()) mClock.Add(1) // IDK why it is needed but it makes it work @@ -27,18 +29,18 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { }() b1 := blocks.NewBlock([]byte("foo")) - require.NoError(t, tc.Put(b1)) + require.NoError(t, tc.Put(ctx, b1)) b2 := blocks.NewBlock([]byte("bar")) - require.NoError(t, tc.Put(b2)) + require.NoError(t, tc.Put(ctx, b2)) b3 := blocks.NewBlock([]byte("baz")) - b1out, err := tc.Get(b1.Cid()) + b1out, err := tc.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), b1out.RawData()) - has, err := tc.Has(b1.Cid()) + has, err := tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) @@ -46,17 +48,17 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // We should still have everything. - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) // extend b2, add b3. - require.NoError(t, tc.Put(b2)) - require.NoError(t, tc.Put(b3)) + require.NoError(t, tc.Put(ctx, b2)) + require.NoError(t, tc.Put(ctx, b3)) // all keys once. allKeys, err := tc.AllKeysChan(context.Background()) @@ -71,15 +73,15 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // should still have b2, and b3, but not b1 - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.False(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b3.Cid()) + has, err = tc.Has(ctx, b3.Cid()) require.NoError(t, err) require.True(t, has) } diff --git a/blockstore/union.go b/blockstore/union.go index a99ba2591..f54a86590 100644 --- a/blockstore/union.go +++ b/blockstore/union.go @@ -19,72 +19,72 @@ func Union(stores ...Blockstore) Blockstore { return unionBlockstore(stores) } -func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) { +func (m unionBlockstore) Has(ctx context.Context, cid cid.Cid) (has bool, err error) { for _, bs := range m { - if has, err = bs.Has(cid); has || err != nil { + if has, err = bs.Has(ctx, cid); has || err != nil { break } } return has, err } -func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) { +func (m unionBlockstore) Get(ctx context.Context, cid cid.Cid) (blk blocks.Block, err error) { for _, bs := range m { - if blk, err = bs.Get(cid); err == nil || err != ErrNotFound { + if blk, err = bs.Get(ctx, cid); err == nil || err != ErrNotFound { break } } return blk, err } -func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) { +func (m unionBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) (err error) { for _, bs := range m { - if err = bs.View(cid, callback); err == nil || err != ErrNotFound { + if err = bs.View(ctx, cid, callback); err == nil || err != ErrNotFound { break } } return err } -func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) { +func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, err error) { for _, bs := range m { - if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound { + if size, err = bs.GetSize(ctx, cid); err == nil || err != ErrNotFound { break } } return size, err } -func (m unionBlockstore) Put(block blocks.Block) (err error) { +func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) { for _, bs := range m { - if err = bs.Put(block); err != nil { + if err = bs.Put(ctx, block); err != nil { break } } return err } -func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) { +func (m unionBlockstore) PutMany(ctx context.Context, blks []blocks.Block) (err error) { for _, bs := range m { - if err = bs.PutMany(blks); err != nil { + if err = bs.PutMany(ctx, blks); err != nil { break } } return err } -func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) { +func (m unionBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteBlock(cid); err != nil { + if err = bs.DeleteBlock(ctx, cid); err != nil { break } } return err } -func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) { +func (m unionBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteMany(cids); err != nil { + if err = bs.DeleteMany(ctx, cids); err != nil { break } } diff --git a/blockstore/union_test.go b/blockstore/union_test.go index b62026892..3ae8c1d49 100644 --- a/blockstore/union_test.go +++ b/blockstore/union_test.go @@ -15,79 +15,81 @@ var ( ) func TestUnionBlockstore_Get(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() - _ = m1.Put(b1) - _ = m2.Put(b2) + _ = m1.Put(ctx, b1) + _ = m2.Put(ctx, b2) u := Union(m1, m2) - v1, err := u.Get(b1.Cid()) + v1, err := u.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), v1.RawData()) - v2, err := u.Get(b2.Cid()) + v2, err := u.Get(ctx, b2.Cid()) require.NoError(t, err) require.Equal(t, b2.RawData(), v2.RawData()) } func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() u := Union(m1, m2) - err := u.Put(b0) + err := u.Put(ctx, b0) require.NoError(t, err) var has bool // write was broadcasted to all stores. - has, _ = m1.Has(b0.Cid()) + has, _ = m1.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = m2.Has(b0.Cid()) + has, _ = m2.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = u.Has(b0.Cid()) + has, _ = u.Has(ctx, b0.Cid()) require.True(t, has) // put many. - err = u.PutMany([]blocks.Block{b1, b2}) + err = u.PutMany(ctx, []blocks.Block{b1, b2}) require.NoError(t, err) // write was broadcasted to all stores. - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m1.Has(b2.Cid()) + has, _ = m1.Has(ctx, b2.Cid()) require.True(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m2.Has(b2.Cid()) + has, _ = m2.Has(ctx, b2.Cid()) require.True(t, has) // also in the union store. - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = u.Has(b2.Cid()) + has, _ = u.Has(ctx, b2.Cid()) require.True(t, has) // deleted from all stores. - err = u.DeleteBlock(b1.Cid()) + err = u.DeleteBlock(ctx, b1.Cid()) require.NoError(t, err) - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.False(t, has) // check that AllKeysChan returns b0 and b2, twice (once per backing store) diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 6581da48e..1e4efea1a 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 6b904873f..08f41f3c5 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index d3363dd17..edfabcc6b 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index b9db0a467..6c0918c51 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -17,7 +17,7 @@ import ( const BootstrappersFile = "" const GenesisFile = "" -const GenesisNetworkVersion = network.Version14 +const GenesisNetworkVersion = network.Version15 var UpgradeBreezeHeight = abi.ChainEpoch(-1) @@ -47,6 +47,8 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeSnapDealsHeight = abi.ChainEpoch(-18) + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 73e1be7fc..0a8ba8898 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -41,6 +41,8 @@ const UpgradeNorwegianHeight = -14 const UpgradeTurboHeight = -15 const UpgradeHyperdriveHeight = -16 const UpgradeChocolateHeight = -17 +// 2022-01-13T19:00:00Z +const UpgradeSnapDealsHeight = 18742 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index 8cd99d642..16d77c7e6 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -54,6 +54,8 @@ const UpgradeHyperdriveHeight = 420 const UpgradeChocolateHeight = 312746 +const UpgradeSnapDealsHeight = 99999999 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) policy.SetSupportedProofTypes( diff --git a/build/params_interop.go b/build/params_interop.go index de5ee9a12..66033937c 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -47,6 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15) var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeSnapDealsHeight = abi.ChainEpoch(-18) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 0c8c53ba8..a4781f1ff 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -62,18 +62,20 @@ const UpgradeNorwegianHeight = 665280 const UpgradeTurboHeight = 712320 // 2021-06-30T22:00:00Z -var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) +const UpgradeHyperdriveHeight = 892800 // 2021-10-26T13:30:00Z -var UpgradeChocolateHeight = abi.ChainEpoch(1231620) +const UpgradeChocolateHeight = 1231620 + +var UpgradeSnapDealsHeight = abi.ChainEpoch(999999999999) func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" { - UpgradeChocolateHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" { + UpgradeSnapDealsHeight = math.MaxInt64 } Devnet = false diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 0a242f6f2..704c84639 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -34,7 +34,7 @@ const NewestNetworkVersion = network.Version{{.latestNetworkVersion}} /* inline-gen start */ -const NewestNetworkVersion = network.Version14 +const NewestNetworkVersion = network.Version15 /* inline-gen end */ diff --git a/build/params_testground.go b/build/params_testground.go index 48b76f82c..539e06b45 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -99,6 +99,7 @@ var ( UpgradeTurboHeight abi.ChainEpoch = -14 UpgradeHyperdriveHeight abi.ChainEpoch = -15 UpgradeChocolateHeight abi.ChainEpoch = -16 + UpgradeSnapDealsHeight abi.ChainEpoch = -17 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/version.go b/build/version.go index bc2b06b25..3656dc9ad 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.13.2-rc7" +const BuildVersion = "1.13.3-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 249ce133f..57ea510bb 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -23,6 +23,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -50,6 +52,10 @@ func init() { builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var Methods = builtin4.MethodsAccount @@ -75,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.AccountActorCodeID: return load6(store, act.Head) + case builtin7.AccountActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -100,6 +109,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, case actors.Version6: return make6(store, addr) + case actors.Version7: + return make7(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -125,6 +137,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.AccountActorCodeID, nil + case actors.Version7: + return builtin7.AccountActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/account/v7.go b/chain/actors/builtin/account/v7.go new file mode 100644 index 000000000..883776cf8 --- /dev/null +++ b/chain/actors/builtin/account/v7.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, addr address.Address) (State, error) { + out := state7{store: store} + out.State = account7.State{Address: addr} + return &out, nil +} + +type state7 struct { + account7.State + store adt.Store +} + +func (s *state7) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index ebfe2df2e..febbca479 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -23,46 +23,50 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) -var SystemActorAddr = builtin6.SystemActorAddr -var BurntFundsActorAddr = builtin6.BurntFundsActorAddr -var CronActorAddr = builtin6.CronActorAddr +var SystemActorAddr = builtin7.SystemActorAddr +var BurntFundsActorAddr = builtin7.BurntFundsActorAddr +var CronActorAddr = builtin7.CronActorAddr var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") var ( - ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch + ExpectedLeadersPerEpoch = builtin7.ExpectedLeadersPerEpoch ) const ( - EpochDurationSeconds = builtin6.EpochDurationSeconds - EpochsInDay = builtin6.EpochsInDay - SecondsInDay = builtin6.SecondsInDay + EpochDurationSeconds = builtin7.EpochDurationSeconds + EpochsInDay = builtin7.EpochsInDay + SecondsInDay = builtin7.SecondsInDay ) const ( - MethodSend = builtin6.MethodSend - MethodConstructor = builtin6.MethodConstructor + MethodSend = builtin7.MethodSend + MethodConstructor = builtin7.MethodConstructor ) // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. -type SectorInfo = proof6.SectorInfo -type PoStProof = proof6.PoStProof +type SectorInfo = proof7.SectorInfo +type ExtendedSectorInfo = proof7.ExtendedSectorInfo +type PoStProof = proof7.PoStProof type FilterEstimate = smoothing0.FilterEstimate func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) + return miner7.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) } func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { @@ -101,6 +105,12 @@ func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate { } +func FromV7FilterEstimate(v7 smoothing7.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v7) + +} + type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) @@ -138,6 +148,9 @@ func ActorNameByCode(c cid.Cid) string { case builtin6.IsBuiltinActor(c): return builtin6.ActorNameByCode(c) + case builtin7.IsBuiltinActor(c): + return builtin7.ActorNameByCode(c) + default: return "" } @@ -169,6 +182,10 @@ func IsBuiltinActor(c cid.Cid) bool { return true } + if builtin7.IsBuiltinActor(c) { + return true + } + return false } @@ -198,6 +215,10 @@ func IsAccountActor(c cid.Cid) bool { return true } + if c == builtin7.AccountActorCodeID { + return true + } + return false } @@ -227,6 +248,10 @@ func IsStorageMinerActor(c cid.Cid) bool { return true } + if c == builtin7.StorageMinerActorCodeID { + return true + } + return false } @@ -256,6 +281,10 @@ func IsMultisigActor(c cid.Cid) bool { return true } + if c == builtin7.MultisigActorCodeID { + return true + } + return false } @@ -285,6 +314,10 @@ func IsPaymentChannelActor(c cid.Cid) bool { return true } + if c == builtin7.PaymentChannelActorCodeID { + return true + } + return false } diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template index 031c05182..f5d5eb77b 100644 --- a/chain/actors/builtin/builtin.go.template +++ b/chain/actors/builtin/builtin.go.template @@ -45,6 +45,7 @@ const ( // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. type SectorInfo = proof{{.latestVersion}}.SectorInfo +type ExtendedSectorInfo = proof{{.latestVersion}}.ExtendedSectorInfo type PoStProof = proof{{.latestVersion}}.PoStProof type FilterEstimate = smoothing0.FilterEstimate diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 9178a44ab..f27a14ac7 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -17,6 +17,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -40,6 +42,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -65,14 +70,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.CronActorCodeID, nil + case actors.Version7: + return builtin7.CronActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin6.CronActorAddr - Methods = builtin6.MethodsCron + Address = builtin7.CronActorAddr + Methods = builtin7.MethodsCron ) type State interface { diff --git a/chain/actors/builtin/cron/v7.go b/chain/actors/builtin/cron/v7.go new file mode 100644 index 000000000..e5538c89f --- /dev/null +++ b/chain/actors/builtin/cron/v7.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = *cron7.ConstructState(cron7.BuiltInEntries()) + return &out, nil +} + +type state7 struct { + cron7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index ee06eeab7..737241ffe 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -25,6 +25,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -52,11 +54,15 @@ func init() { builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.InitActorAddr - Methods = builtin6.MethodsInit + Address = builtin7.InitActorAddr + Methods = builtin7.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -80,6 +86,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.InitActorCodeID: return load6(store, act.Head) + case builtin7.InitActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -105,6 +114,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e case actors.Version6: return make6(store, networkName) + case actors.Version7: + return make7(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -130,6 +142,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.InitActorCodeID, nil + case actors.Version7: + return builtin7.InitActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/init/v7.go b/chain/actors/builtin/init/v7.go new file mode 100644 index 000000000..341aa52cd --- /dev/null +++ b/chain/actors/builtin/init/v7.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, networkName string) (State, error) { + out := state7{store: store} + + s, err := init7.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + init7.State + store adt.Store +} + +func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state7) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state7) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state7) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state7) Remove(addrs ...address.Address) (err error) { + m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state7) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state7) AddressMap() (adt.Map, error) { + return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 7e35f3919..6781b55e3 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -25,6 +25,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -56,11 +58,15 @@ func init() { builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StorageMarketActorAddr - Methods = builtin6.MethodsMarket + Address = builtin7.StorageMarketActorAddr + Methods = builtin7.MethodsMarket ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -84,6 +90,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMarketActorCodeID: return load6(store, act.Head) + case builtin7.StorageMarketActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -109,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -134,6 +146,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMarketActorCodeID, nil + case actors.Version7: + return builtin7.StorageMarketActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -211,6 +226,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actors.Version6: return decodePublishStorageDealsReturn6(b) + case actors.Version7: + return decodePublishStorageDealsReturn7(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go new file mode 100644 index 000000000..553913146 --- /dev/null +++ b/chain/actors/builtin/market/v7.go @@ -0,0 +1,252 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := market7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + market7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state7) BalancesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil +} + +func (s *state7) StatesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState7.State.States), nil +} + +func (s *state7) States() (DealStates, error) { + stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates7{stateArray}, nil +} + +func (s *state7) ProposalsChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState7.State.Proposals), nil +} + +func (s *state7) Proposals() (DealProposals, error) { + proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals7{proposalArray}, nil +} + +func (s *state7) EscrowTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) LockedTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state7) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable7 struct { + *adt7.BalanceTable +} + +func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt7.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates7 struct { + adt.Array +} + +func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal7 market7.DealState + found, err := s.Array.Get(uint64(dealID), &deal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV7DealState(deal7) + return &deal, true, nil +} + +func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds7 market7.DealState + return s.Array.ForEach(&ds7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealState(ds7)) + }) +} + +func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { + var ds7 market7.DealState + if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV7DealState(ds7) + return &ds, nil +} + +func (s *dealStates7) array() adt.Array { + return s.Array +} + +func fromV7DealState(v7 market7.DealState) DealState { + return (DealState)(v7) +} + +type dealProposals7 struct { + adt.Array +} + +func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal7 market7.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV7DealProposal(proposal7) + return &proposal, true, nil +} + +func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp7 market7.DealProposal + return s.Array.ForEach(&dp7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealProposal(dp7)) + }) +} + +func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp7 market7.DealProposal + if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV7DealProposal(dp7) + return &dp, nil +} + +func (s *dealProposals7) array() adt.Array { + return s.Array +} + +func fromV7DealProposal(v7 market7.DealProposal) DealProposal { + return (DealProposal)(v7) +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil) + +func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) { + var retval market7.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn7{retval}, nil +} + +type publishStorageDealsReturn7 struct { + market7.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) { + + return r.ValidDeals.IsSet(index) + +} + +func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 2669a05a6..74c16be36 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -23,6 +23,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" {{range .versions}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} @@ -177,6 +178,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo @@ -192,6 +194,7 @@ type SectorPreCommitOnChainInfo struct { type PoStPartition = miner0.PoStPartition type RecoveryDeclaration = miner0.RecoveryDeclaration type FaultDeclaration = miner0.FaultDeclaration +type ReplicaUpdate = miner7.ReplicaUpdate // Params type DeclareFaultsParams = miner0.DeclareFaultsParams @@ -200,6 +203,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams +type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 1c7f47e11..7889d7a4d 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -23,6 +23,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -35,6 +36,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -63,9 +66,13 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } -var Methods = builtin6.MethodsMiner +var Methods = builtin7.MethodsMiner // Unchanged between v0, v2, v3, v4, and v5 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod @@ -102,6 +109,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMinerActorCodeID: return load6(store, act.Head) + case builtin7.StorageMinerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -127,6 +137,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -152,6 +165,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMinerActorCodeID, nil + case actors.Version7: + return builtin7.StorageMinerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -251,6 +267,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo @@ -266,6 +283,7 @@ type SectorPreCommitOnChainInfo struct { type PoStPartition = miner0.PoStPartition type RecoveryDeclaration = miner0.RecoveryDeclaration type FaultDeclaration = miner0.FaultDeclaration +type ReplicaUpdate = miner7.ReplicaUpdate // Params type DeclareFaultsParams = miner0.DeclareFaultsParams @@ -274,6 +292,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams +type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index 2ea6a905e..775631961 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -138,11 +138,22 @@ func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpirati return nil, err } // NOTE: this can be optimized significantly. - // 1. If the sector is non-faulty, it will either expire on-time (can be +{{if (ge .v 7) -}} + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). +{{- else -}} + // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. +{{- end}} +{{if (ge .v 6) -}} + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. +{{- else -}} // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. +{{- end}} + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { @@ -554,8 +565,7 @@ func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) { } func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { -{{if (ge .v 2)}} - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v{{.v}}.SectorNumber, SealProof: v{{.v}}.SealProof, SealedCID: v{{.v}}.SealedCID, @@ -567,10 +577,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO InitialPledge: v{{.v}}.InitialPledge, ExpectedDayReward: v{{.v}}.ExpectedDayReward, ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, + {{if (ge .v 7)}} + SectorKeyCID: v{{.v}}.SectorKeyCID, + {{end}} } -{{else}} - return (SectorOnChainInfo)(v0) -{{end}} + return info } func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 564bcbbc2..8bde8bf73 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -140,6 +140,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { @@ -505,9 +506,20 @@ func (p *partition0) UnprovenSectors() (bitfield.BitField, error) { } func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { - - return (SectorOnChainInfo)(v0) - + info := SectorOnChainInfo{ + SectorNumber: v0.SectorNumber, + SealProof: v0.SealProof, + SealedCID: v0.SealedCID, + DealIDs: v0.DealIDs, + Activation: v0.Activation, + Expiration: v0.Expiration, + DealWeight: v0.DealWeight, + VerifiedDealWeight: v0.VerifiedDealWeight, + InitialPledge: v0.InitialPledge, + ExpectedDayReward: v0.ExpectedDayReward, + ExpectedStoragePledge: v0.ExpectedStoragePledge, + } + return info } func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index fe0863111..bbfdd403e 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -138,6 +138,7 @@ func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error { @@ -535,8 +536,7 @@ func (p *partition2) UnprovenSectors() (bitfield.BitField, error) { } func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v2.SectorNumber, SealProof: v2.SealProof, SealedCID: v2.SealedCID, @@ -549,7 +549,7 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v2.ExpectedDayReward, ExpectedStoragePledge: v2.ExpectedStoragePledge, } - + return info } func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index b0d5429ea..68505918a 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -140,6 +140,7 @@ func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition3) UnprovenSectors() (bitfield.BitField, error) { } func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v3.SectorNumber, SealProof: v3.SealProof, SealedCID: v3.SealedCID, @@ -550,7 +550,7 @@ func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v3.ExpectedDayReward, ExpectedStoragePledge: v3.ExpectedStoragePledge, } - + return info } func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 7e5a9761a..5c40d4189 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -140,6 +140,7 @@ func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition4) UnprovenSectors() (bitfield.BitField, error) { } func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v4.SectorNumber, SealProof: v4.SealProof, SealedCID: v4.SealedCID, @@ -550,7 +550,7 @@ func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v4.ExpectedDayReward, ExpectedStoragePledge: v4.ExpectedStoragePledge, } - + return info } func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 7f4aaf168..f717934f4 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -140,6 +140,7 @@ func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition5) UnprovenSectors() (bitfield.BitField, error) { } func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v5.SectorNumber, SealProof: v5.SealProof, SealedCID: v5.SealedCID, @@ -550,7 +550,7 @@ func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v5.ExpectedDayReward, ExpectedStoragePledge: v5.ExpectedStoragePledge, } - + return info } func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v6.go b/chain/actors/builtin/miner/v6.go index de5a22a10..7a9dfb0df 100644 --- a/chain/actors/builtin/miner/v6.go +++ b/chain/actors/builtin/miner/v6.go @@ -138,8 +138,9 @@ func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. - // 2. If it's faulty, it will expire early within the first 14 entries + // 2. If it's faulty, it will expire early within the first 42 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition6) UnprovenSectors() (bitfield.BitField, error) { } func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v6.SectorNumber, SealProof: v6.SealProof, SealedCID: v6.SealedCID, @@ -550,7 +550,7 @@ func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v6.ExpectedDayReward, ExpectedStoragePledge: v6.ExpectedStoragePledge, } - + return info } func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v7.go b/chain/actors/builtin/miner/v7.go new file mode 100644 index 000000000..e1b2520e4 --- /dev/null +++ b/chain/actors/builtin/miner/v7.go @@ -0,0 +1,571 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = miner7.State{} + return &out, nil +} + +type state7 struct { + miner7.State + store adt.Store +} + +type deadline7 struct { + miner7.Deadline + store adt.Store +} + +type partition7 struct { + miner7.Partition + store adt.Store +} + +func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state7) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state7) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state7) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state7) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner7.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner7.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state7) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner7.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV7SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner7.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info7 miner7.SectorOnChainInfo + if err := sectors.ForEach(&info7, func(_ int64) error { + info := fromV7SectorOnChainInfo(info7) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos7, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos7)) + for i, info7 := range infos7 { + info := fromV7SectorOnChainInfo(*info7) + infos[i] = &info + } + return infos, nil +} + +func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state7) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline7{*dl, s.store}, nil +} + +func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error { + return cb(i, &deadline7{*dl, s.store}) + }) +} + +func (s *state7) NumDeadlines() (uint64, error) { + return miner7.WPoStPeriodDeadlines, nil +} + +func (s *state7) DeadlinesChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other7.Deadlines), nil +} + +func (s *state7) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state7) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state7) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state7) sectors() (adt.Array, error) { + return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth) +} + +func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner7.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV7SectorOnChainInfo(si), nil +} + +func (s *state7) precommits() (adt.Map, error) { + return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner7.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV7SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state7) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner7.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline7) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition7{*p, d.store}, nil +} + +func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner7.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition7{part, d.store}) + }) +} + +func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) { + other7, ok := other.(*deadline7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil +} + +func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline7) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition7) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition7) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition7) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition7) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v7.SectorNumber, + SealProof: v7.SealProof, + SealedCID: v7.SealedCID, + DealIDs: v7.DealIDs, + Activation: v7.Activation, + Expiration: v7.Expiration, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + InitialPledge: v7.InitialPledge, + ExpectedDayReward: v7.ExpectedDayReward, + ExpectedStoragePledge: v7.ExpectedStoragePledge, + + SectorKeyCID: v7.SectorKeyCID, + } + return info +} + +func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v7.Info), + PreCommitDeposit: v7.PreCommitDeposit, + PreCommitEpoch: v7.PreCommitEpoch, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + } + +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/message7.go b/chain/actors/builtin/multisig/message7.go new file mode 100644 index 000000000..e7fb83e9b --- /dev/null +++ b/chain/actors/builtin/multisig/message7.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ message0 } + +func (m message7) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig7.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init7.ExecParams{ + CodeCID: builtin7.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin7.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index ee725f7e5..f1b50475a 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/go-cid" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig" + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.MultisigActorCodeID: return load6(store, act.Head) + case builtin7.MultisigActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th case actors.Version6: return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actors.Version7: + return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.MultisigActorCodeID, nil + case actors.Version7: + return builtin7.MultisigActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -156,7 +171,7 @@ type State interface { type Transaction = msig0.Transaction -var Methods = builtin6.MethodsMultisig +var Methods = builtin7.MethodsMultisig func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -178,6 +193,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{message0{from}} + + case actors.Version7: + return message7{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -201,13 +219,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig6.ProposalHashData -type ProposeReturn = msig6.ProposeReturn -type ProposeParams = msig6.ProposeParams -type ApproveReturn = msig6.ApproveReturn +type ProposalHashData = msig7.ProposalHashData +type ProposeReturn = msig7.ProposeReturn +type ProposeParams = msig7.ProposeParams +type ApproveReturn = msig7.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig6.TxnIDParams{ID: msig6.TxnID(id)} + params := msig7.TxnIDParams{ID: msig7.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) diff --git a/chain/actors/builtin/multisig/v7.go b/chain/actors/builtin/multisig/v7.go new file mode 100644 index 000000000..bbe41f3db --- /dev/null +++ b/chain/actors/builtin/multisig/v7.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state7{store: store} + out.State = msig7.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state7 struct { + msig7.State + store adt.Store +} + +func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state7) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state7) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state7) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state7) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state7) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig7.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state7) PendingTxnChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other7.PendingTxns), nil +} + +func (s *state7) transactions() (adt.Map, error) { + return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig7.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/message7.go b/chain/actors/builtin/paych/message7.go new file mode 100644 index 000000000..41dfa1bdd --- /dev/null +++ b/chain/actors/builtin/paych/message7.go @@ -0,0 +1,74 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ from address.Address } + +func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init7.ExecParams{ + CodeCID: builtin7.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin7.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{ + Sv: *sv, + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message7) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Settle, + }, nil +} + +func (m message7) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index eea3659f8..f807b33ed 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } // Load returns an abstract copy of payment channel state, irregardless of actor version @@ -82,6 +88,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.PaymentChannelActorCodeID: return load6(store, act.Head) + case builtin7.PaymentChannelActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -107,6 +116,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -132,6 +144,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.PaymentChannelActorCodeID, nil + case actors.Version7: + return builtin7.PaymentChannelActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -185,7 +200,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) { return &sv, nil } -var Methods = builtin6.MethodsPaych +var Methods = builtin7.MethodsPaych func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -208,6 +223,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{from} + case actors.Version7: + return message7{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } diff --git a/chain/actors/builtin/paych/v7.go b/chain/actors/builtin/paych/v7.go new file mode 100644 index 000000000..ce09ea2e4 --- /dev/null +++ b/chain/actors/builtin/paych/v7.go @@ -0,0 +1,114 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = paych7.State{} + return &out, nil +} + +type state7 struct { + paych7.State + store adt.Store + lsAmt *adt7.Array +} + +// Channel owner, who has funded the actor +func (s *state7) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state7) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state7) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state7) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state7) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych7.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState7{ls}) + }) +} + +type laneState7 struct { + paych7.LaneState +} + +func (ls *laneState7) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState7) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 84bd6948a..9b73cdd60 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -26,6 +26,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -53,11 +55,15 @@ func init() { builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StoragePowerActorAddr - Methods = builtin6.MethodsPower + Address = builtin7.StoragePowerActorAddr + Methods = builtin7.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StoragePowerActorCodeID: return load6(store, act.Head) + case builtin7.StoragePowerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StoragePowerActorCodeID, nil + case actors.Version7: + return builtin7.StoragePowerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/power/v7.go b/chain/actors/builtin/power/v7.go new file mode 100644 index 000000000..af1761cb2 --- /dev/null +++ b/chain/actors/builtin/power/v7.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := power7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + power7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state7) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state7) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power7.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV7FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state7) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state7) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power7.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state7) ClaimsChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other7.State.Claims), nil +} + +func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) claims() (adt.Map, error) { + return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power7.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV7Claim(ci), nil +} + +func fromV7Claim(v7 power7.Claim) Claim { + return Claim{ + RawBytePower: v7.RawBytePower, + QualityAdjPower: v7.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 38d5b5b87..b6ee2f146 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -51,11 +53,15 @@ func init() { builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.RewardActorAddr - Methods = builtin6.MethodsReward + Address = builtin7.RewardActorAddr + Methods = builtin7.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -79,6 +85,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.RewardActorCodeID: return load6(store, act.Head) + case builtin7.RewardActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -104,6 +113,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage case actors.Version6: return make6(store, currRealizedPower) + case actors.Version7: + return make7(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -129,6 +141,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.RewardActorCodeID, nil + case actors.Version7: + return builtin7.RewardActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/reward/v7.go b/chain/actors/builtin/reward/v7.go new file mode 100644 index 000000000..368bb3abd --- /dev/null +++ b/chain/actors/builtin/reward/v7.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state7{store: store} + out.State = *reward7.ConstructState(currRealizedPower) + return &out, nil +} + +type state7 struct { + reward7.State + store adt.Store +} + +func (s *state7) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state7) CumsumBaseline() (reward7.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state7) CumsumRealized() (reward7.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner7.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 3d6105c38..fb7515f35 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -17,10 +17,12 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) var ( - Address = builtin6.SystemActorAddr + Address = builtin7.SystemActorAddr ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -44,6 +46,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -69,6 +74,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.SystemActorCodeID, nil + case actors.Version7: + return builtin7.SystemActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/system/v7.go b/chain/actors/builtin/system/v7.go new file mode 100644 index 000000000..813add5fb --- /dev/null +++ b/chain/actors/builtin/system/v7.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = system7.State{} + return &out, nil +} + +type state7 struct { + system7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go new file mode 100644 index 000000000..9b2ca928a --- /dev/null +++ b/chain/actors/builtin/verifreg/v7.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state7{store: store} + + s, err := verifreg7.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + verifreg7.State + store adt.Store +} + +func (s *state7) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiedClients, addr) +} + +func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiers, addr) +} + +func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiers, cb) +} + +func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiedClients, cb) +} + +func (s *state7) verifiedClients() (adt.Map, error) { + return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) verifiers() (adt.Map, error) { + return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 31e8e5a08..f6281334d 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -53,11 +55,15 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } var ( - Address = builtin6.VerifiedRegistryActorAddr - Methods = builtin6.MethodsVerifiedRegistry + Address = builtin7.VerifiedRegistryActorAddr + Methods = builtin7.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.VerifiedRegistryActorCodeID: return load6(store, act.Head) + case builtin7.VerifiedRegistryActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres case actors.Version6: return make6(store, rootKeyAddress) + case actors.Version7: + return make7(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.VerifiedRegistryActorCodeID, nil + case actors.Version7: + return builtin7.VerifiedRegistryActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index e00a6ae10..f51da7aa7 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -40,14 +40,19 @@ import ( miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg" - paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" ) const ( - ChainFinality = miner6.ChainFinality + ChainFinality = miner7.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych6.SettleDelay - MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych7.SettleDelay + MaxPreCommitRandomnessLookback = builtin7.EpochsInDay + SealRandomnessLookback ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -72,6 +77,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + AddSupportedProofTypes(types...) } @@ -119,6 +126,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.WindowPoStProofTypes[wpp] = struct{}{} + miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner7.WindowPoStProofTypes[wpp] = struct{}{} + } } @@ -139,11 +155,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner6.PreCommitChallengeDelay = delay + miner7.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner6.PreCommitChallengeDelay + return miner7.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -173,6 +191,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin7.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -191,6 +213,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg6.MinVerifiedDealSize = size + verifreg7.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -220,6 +244,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a return miner6.MaxProveCommitDuration[t], nil + case actors.Version7: + + return miner7.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -255,6 +283,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -298,13 +331,18 @@ func DealProviderCollateralBounds( min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actors.Version7: + + min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market6.DealDurationBounds(pieceSize) + return market7.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -345,6 +383,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner6.WPoStDisputeWindow = period * 30 + miner7.WPoStChallengeWindow = period + miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner7.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -357,15 +402,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { } func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner6.MaxSectorExpirationExtension + return miner7.MaxSectorExpirationExtension } func GetMinSectorExpiration() abi.ChainEpoch { - return miner6.MinSectorExpiration + return miner7.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin7.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -378,8 +423,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e func GetDefaultSectorSize() abi.SectorSize { // supported sector sizes are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8)) - for spt := range miner6.PreCommitSealProofTypesV8 { + szs := make([]abi.SectorSize, 0, len(miner7.PreCommitSealProofTypesV8)) + for spt := range miner7.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -404,7 +449,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin7.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -432,6 +477,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actors.Version6: return miner6.AddressedSectorsMax, nil + case actors.Version7: + return miner7.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -469,6 +517,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner6.DeclarationsMax, nil + case actors.Version7: + + return miner7.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -505,6 +557,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -541,6 +597,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 7b7a6393a..af51161c9 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -20,9 +20,9 @@ const ({{range .actorVersions}} /* inline-gen start */ -var LatestVersion = 6 +var LatestVersion = 7 -var Versions = []int{0, 2, 3, 4, 5, 6} +var Versions = []int{0, 2, 3, 4, 5, 6, 7} const ( Version0 Version = 0 @@ -31,6 +31,7 @@ const ( Version4 Version = 4 Version5 Version = 5 Version6 Version = 6 + Version7 Version = 7 ) /* inline-gen end */ @@ -50,6 +51,8 @@ func VersionForNetwork(version network.Version) (Version, error) { return Version5, nil case network.Version14: return Version6, nil + case network.Version15: + return Version7, nil default: return -1, fmt.Errorf("unsupported network version %d", version) } diff --git a/chain/checkpoint.go b/chain/checkpoint.go index a3660a45c..4f8310593 100644 --- a/chain/checkpoint.go +++ b/chain/checkpoint.go @@ -13,7 +13,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("called with empty tsk") } - ts, err := syncer.ChainStore().LoadTipSet(tsk) + ts, err := syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1) if err != nil { @@ -28,7 +28,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) } - if err := syncer.ChainStore().SetCheckpoint(ts); err != nil { + if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain checkpoint: %w", err) } @@ -41,7 +41,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return nil } - if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc { + if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc { return nil } @@ -50,7 +50,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return xerrors.Errorf("failed to collect chain for checkpoint: %w", err) } - if err := syncer.ChainStore().SetHead(ts); err != nil { + if err := syncer.ChainStore().SetHead(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain head: %w", err) } return nil diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/filcns/compute_state.go index 3c333298e..2f8cab740 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/filcns/compute_state.go @@ -28,6 +28,7 @@ import ( exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" /* inline-gen end */ @@ -59,6 +60,7 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...) + inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...) /* inline-gen end */ @@ -99,7 +101,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager Actors: NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, epoch), BaseFee: baseFee, LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), } @@ -167,7 +169,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager } } - vmi.SetBlockHeight(i + 1) + if err = vmi.SetBlockHeight(ctx, i+1); err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("error advancing vm an epoch: %w", err) + } + pstate = newState } @@ -289,7 +294,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag var parentEpoch abi.ChainEpoch pstate := blks[0].ParentStateRoot if blks[0].Height > 0 { - parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0]) + parent, err := sm.ChainStore().GetBlock(ctx, blks[0].Parents[0]) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) } @@ -297,9 +302,9 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag parentEpoch = parent.Height } - r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon()) + r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon(), sm.GetNetworkVersion) - blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts) + blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ctx, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 883edd9a1..42020d529 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -26,7 +26,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" @@ -90,19 +90,19 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) h := b.Header - baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) + baseTs, err := filec.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } - winPoStNv := filec.sm.GetNtwkVersion(ctx, baseTs.Height()) + winPoStNv := filec.sm.GetNetworkVersion(ctx, baseTs.Height()) lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } - prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs) + prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) } @@ -171,7 +171,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) } if stateroot != h.ParentStateRoot { - msgs, err := filec.store.MessagesForTipset(baseTs) + msgs, err := filec.store.MessagesForTipset(ctx, baseTs) if err != nil { log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) } else { @@ -400,12 +400,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network. return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) } - sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand) + xsectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand) if err != nil { return xerrors.Errorf("getting winning post sector set: %w", err) } - ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{ + sectors := make([]proof.SectorInfo, len(xsectors)) + for i, xsi := range xsectors { + sectors[i] = proof.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } + + ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{ Randomness: rand, Proofs: h.WinPoStProof, ChallengedSectors: sectors, @@ -457,7 +466,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("failed to load base state tree: %w", err) } - nv := filec.sm.GetNtwkVersion(ctx, b.Header.Height) + nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) pl := vm.PricelistByEpoch(baseTs.Height()) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { @@ -479,7 +488,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense var sender address.Address - if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { + if filec.sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version13 { sender, err = st.LookupID(m.From) if err != nil { return err @@ -519,7 +528,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } @@ -532,7 +541,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl smArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.SecpkMessages { - if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version14 { + if filec.sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version14 { if m.Signature.Type != crypto.SigTypeSecp256k1 { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) } @@ -553,7 +562,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } diff --git a/chain/consensus/filcns/mine.go b/chain/consensus/filcns/mine.go index bbda35fcf..35e38883d 100644 --- a/chain/consensus/filcns/mine.go +++ b/chain/consensus/filcns/mine.go @@ -15,7 +15,7 @@ import ( ) func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { - pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents) + pts, err := filec.sm.ChainStore().LoadTipSet(ctx, bt.Parents) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -59,14 +59,14 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. blsSigs = append(blsSigs, msg.Signature) blsMessages = append(blsMessages, &msg.Message) - c, err := filec.sm.ChainStore().PutMessage(&msg.Message) + c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message) if err != nil { return nil, err } blsMsgCids = append(blsMsgCids, c) - } else { - c, err := filec.sm.ChainStore().PutMessage(msg) + } else if msg.Signature.Type == crypto.SigTypeSecp256k1 { + c, err := filec.sm.ChainStore().PutMessage(ctx, msg) if err != nil { return nil, err } @@ -74,6 +74,8 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. secpkMsgCids = append(secpkMsgCids, c) secpkMessages = append(secpkMessages, msg) + } else { + return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type) } } diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index cf4c62bf3..863193180 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -6,6 +6,7 @@ import ( "time" "github.com/filecoin-project/specs-actors/v6/actors/migration/nv14" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -156,6 +157,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { StopWithin: 5, }}, Expensive: true, + }, { + Height: build.UpgradeSnapDealsHeight, + Network: network.Version15, + Migration: UpgradeActorsV7, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV7, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV7, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, }, } @@ -625,7 +642,7 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre // TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { - gb, err := sm.ChainStore().GetGenesis() + gb, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } @@ -1170,7 +1187,94 @@ func upgradeActorsV6Common( // Perform the migration newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persist the new tree. + + { + from := buf + to := buf.Read() + + if err := vm.Copy(ctx, from, to, newRoot); err != nil { + return cid.Undef, xerrors.Errorf("copying migrated tree: %w", err) + } + } + + return newRoot, nil +} + +func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv15.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + config := nv15.Config{MaxWorkers: uint(workerCount)} + _, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config) + return err +} + +func upgradeActorsV7Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv15.Config, +) (cid.Cid, error) { + buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), blockstore.NewMemorySync()) + store := store.ActorStore(ctx, buf) + + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion4 { + return cid.Undef, xerrors.Errorf( + "expected state root version 4 for actors v7 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err) } // Persist the result. diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 61dd25fbb..5f52cbd92 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -87,7 +87,7 @@ func (fcs *fakeCS) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ( } // copied from the chainstore - revert, apply, err := store.ReorgOps(func(tsk types.TipSetKey) (*types.TipSet, error) { + revert, apply, err := store.ReorgOps(ctx, func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { return fcs.ChainGetTipSet(ctx, tsk) }, fromTs, toTs) if err != nil { diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go index 2ed48dc39..7a73355a5 100644 --- a/chain/events/state/mock/api.go +++ b/chain/events/state/mock/api.go @@ -27,11 +27,11 @@ func NewMockAPI(bs blockstore.Blockstore) *MockAPI { } func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) + return m.bs.Has(ctx, c) } func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) + blk, err := m.bs.Get(ctx, c) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } diff --git a/chain/exchange/server.go b/chain/exchange/server.go index 7c1624e57..37d49d7bc 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -136,7 +136,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") defer span.End() - chain, err := collectChainSegment(s.cs, req) + chain, err := collectChainSegment(ctx, s.cs, req) if err != nil { log.Warn("block sync request: collectChainSegment failed: ", err) return &Response{ @@ -156,13 +156,13 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re }, nil } -func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { +func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { var bstips []*BSTipSet cur := req.head for { var bst BSTipSet - ts, err := cs.LoadTipSet(cur) + ts, err := cs.LoadTipSet(ctx, cur) if err != nil { return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) } @@ -172,7 +172,7 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } if req.options.IncludeMessages { - bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts) + bmsgs, bmincl, smsgs, smincl, err := gatherMessages(ctx, cs, ts) if err != nil { return nil, xerrors.Errorf("gather messages failed: %w", err) } @@ -197,14 +197,14 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } } -func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { +func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { blsmsgmap := make(map[cid.Cid]uint64) secpkmsgmap := make(map[cid.Cid]uint64) var secpkincl, blsincl [][]uint64 var blscids, secpkcids []cid.Cid for _, block := range ts.Blocks() { - bc, sc, err := cs.ReadMsgMetaCids(block.Messages) + bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages) if err != nil { return nil, nil, nil, nil, err } @@ -237,12 +237,12 @@ func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [ secpkincl = append(secpkincl, smi) } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) if err != nil { return nil, nil, nil, nil, err } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { return nil, nil, nil, nil, err } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 69ab32d58..4153830dc 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/go-state-types/network" @@ -239,7 +241,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) - if err := cs.SetGenesis(genb.Genesis); err != nil { + if err := cs.SetGenesis(context.TODO(), genb.Genesis); err != nil { return nil, xerrors.Errorf("set genesis failed: %w", err) } @@ -459,7 +461,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, if et != nil { // TODO: maybe think about passing in more real parameters to this? - wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil) + wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0) if err != nil { return nil, err } @@ -469,7 +471,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) } - if err := cg.cs.PersistBlockHeaders(fblk.Header); err != nil { + if err := cg.cs.PersistBlockHeaders(context.TODO(), fblk.Header); err != nil { return nil, xerrors.Errorf("chainstore AddBlock: %w", err) } @@ -618,7 +620,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) + ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) } type wppProvider struct{} @@ -627,7 +629,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) { return ValidWpostForTesting, nil } @@ -686,11 +688,15 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri panic("not supported") } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 29f03e2af..6ab101e78 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -479,6 +479,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca verifNeeds := make(map[address.Address]abi.PaddedPieceSize) var sum abi.PaddedPieceSize + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + vmopt := vm.VMOpts{ StateBase: stateroot, Epoch: 0, @@ -486,11 +490,9 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca Bstore: cs.StateBlockstore(), Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), - CircSupplyCalc: nil, - NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { - return nv - }, - BaseFee: types.NewInt(0), + CircSupplyCalc: csc, + NetworkVersion: nv, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, &vmopt) if err != nil { @@ -591,7 +593,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto if err != nil { return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) } - if err := bs.Put(mmb); err != nil { + if err := bs.Put(ctx, mmb); err != nil { return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) } @@ -621,7 +623,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid") } - if err := bs.Put(gblk); err != nil { + if err := bs.Put(ctx, gblk); err != nil { return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err) } @@ -652,7 +654,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("serializing block header failed: %w", err) } - if err := bs.Put(sb); err != nil { + if err := bs.Put(ctx, sb); err != nil { return nil, xerrors.Errorf("putting header to blockstore: %w", err) } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index edacfe304..274918147 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -6,6 +6,8 @@ import ( "fmt" "math/rand" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" "github.com/ipfs/go-cid" @@ -29,7 +31,6 @@ import ( market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -57,7 +58,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime5.Syscalls + runtime7.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -65,7 +66,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime7.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } @@ -93,10 +94,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, - NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { - return nv - }, - BaseFee: types.NewInt(0), + NetworkVersion: nv, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, vmopt) @@ -509,31 +508,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // TODO: copied from actors test harness, deduplicate or remove from here type fakeRand struct{} -func (fr *fakeRand) GetChainRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint return out, nil } -func (fr *fakeRand) GetChainRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV3(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint return out, nil diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index 5edcd5439..de3af5825 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -1,6 +1,7 @@ package slashfilter import ( + "context" "fmt" "github.com/filecoin-project/lotus/build" @@ -27,7 +28,7 @@ func New(dstore ds.Batching) *SlashFilter { } } -func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { +func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { return nil } @@ -35,7 +36,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) - if err := checkFault(f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { + if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { return err } } @@ -43,7 +44,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes())) { // time-offset mining faults (2 blocks with the same parents) - if err := checkFault(f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { + if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { return err } } @@ -53,14 +54,14 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo // First check if we have mined a block on the parent epoch parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) - have, err := f.byEpoch.Has(parentEpochKey) + have, err := f.byEpoch.Has(ctx, parentEpochKey) if err != nil { return err } if have { // If we had, make sure it's in our parent tipset - cidb, err := f.byEpoch.Get(parentEpochKey) + cidb, err := f.byEpoch.Get(ctx, parentEpochKey) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } @@ -83,25 +84,25 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo } } - if err := f.byParents.Put(parentsKey, bh.Cid().Bytes()); err != nil { + if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } - if err := f.byEpoch.Put(epochKey, bh.Cid().Bytes()); err != nil { + if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } return nil } -func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { - fault, err := t.Has(key) +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { + fault, err := t.Has(ctx, key) if err != nil { return err } if fault { - cidb, err := t.Get(key) + cidb, err := t.Get(ctx, key) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go index 5becfdfa7..e934201d7 100644 --- a/chain/market/fundmanager.go +++ b/chain/market/fundmanager.go @@ -89,7 +89,7 @@ func (fm *FundManager) Start() error { // - in State() only load addresses with in-progress messages // - load the others just-in-time from getFundedAddress // - delete(fm.fundedAddrs, addr) when the queue has been processed - return fm.str.forEach(func(state *FundedAddressState) { + return fm.str.forEach(fm.ctx, func(state *FundedAddressState) { fa := newFundedAddress(fm, state.Addr) fa.state = state fm.fundedAddrs[fa.state.Addr] = fa @@ -322,7 +322,7 @@ func (a *fundedAddress) clearWaitState() { // Save state to datastore func (a *fundedAddress) saveState() { // Not much we can do if saving to the datastore fails, just log - err := a.str.save(a.state) + err := a.str.save(a.ctx, a.state) if err != nil { log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err) } diff --git a/chain/market/store.go b/chain/market/store.go index e0d0e10be..9818b1d80 100644 --- a/chain/market/store.go +++ b/chain/market/store.go @@ -2,6 +2,7 @@ package market import ( "bytes" + "context" cborrpc "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-datastore" @@ -27,7 +28,7 @@ func newStore(ds dtypes.MetadataDS) *Store { } // save the state to the datastore -func (ps *Store) save(state *FundedAddressState) error { +func (ps *Store) save(ctx context.Context, state *FundedAddressState) error { k := dskeyForAddr(state.Addr) b, err := cborrpc.Dump(state) @@ -35,14 +36,14 @@ func (ps *Store) save(state *FundedAddressState) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // get the state for the given address -func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { +func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) { k := dskeyForAddr(addr) - data, err := ps.ds.Get(k) + data, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -56,8 +57,8 @@ func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { } // forEach calls iter with each address in the datastore -func (ps *Store) forEach(iter func(*FundedAddressState)) error { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr}) +func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr}) if err != nil { return err } diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go index a511f84b7..3c07a8a0f 100644 --- a/chain/messagepool/config.go +++ b/chain/messagepool/config.go @@ -1,6 +1,7 @@ package messagepool import ( + "context" "encoding/json" "fmt" "time" @@ -20,8 +21,8 @@ var ( ConfigKey = datastore.NewKey("/mpool/config") ) -func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { - haveCfg, err := ds.Has(ConfigKey) +func loadConfig(ctx context.Context, ds dtypes.MetadataDS) (*types.MpoolConfig, error) { + haveCfg, err := ds.Has(ctx, ConfigKey) if err != nil { return nil, err } @@ -30,7 +31,7 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return DefaultConfig(), nil } - cfgBytes, err := ds.Get(ConfigKey) + cfgBytes, err := ds.Get(ctx, ConfigKey) if err != nil { return nil, err } @@ -39,12 +40,12 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return cfg, err } -func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { +func saveConfig(ctx context.Context, cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { cfgBytes, err := json.Marshal(cfg) if err != nil { return err } - return ds.Put(ConfigKey, cfgBytes) + return ds.Put(ctx, ConfigKey, cfgBytes) } func (mp *MessagePool) GetConfig() *types.MpoolConfig { @@ -68,7 +69,7 @@ func validateConfg(cfg *types.MpoolConfig) error { return nil } -func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { +func (mp *MessagePool) SetConfig(ctx context.Context, cfg *types.MpoolConfig) error { if err := validateConfg(cfg); err != nil { return err } @@ -76,7 +77,7 @@ func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { mp.cfgLk.Lock() mp.cfg = cfg - err := saveConfig(cfg, mp.ds) + err := saveConfig(ctx, cfg, mp.ds) if err != nil { log.Warnf("error persisting mpool config: %s", err) } diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 06343e9c9..5d1857bf2 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -196,10 +196,10 @@ func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { return types.BigAdd(minPrice, types.NewInt(1)) } -func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSepc *api.MessageSendSpec) { +func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) { var maxFee abi.TokenAmount - if sendSepc != nil { - maxFee = sendSepc.MaxFee + if sendSpec != nil { + maxFee = sendSpec.MaxFee } if maxFee.Int == nil || maxFee.Equals(big.Zero()) { mf, err := mff() @@ -358,11 +358,11 @@ func (ms *msgSet) toSlice() []*types.SignedMessage { return set } -func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { +func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) - cfg, err := loadConfig(ds) + cfg, err := loadConfig(ctx, ds) if err != nil { return nil, xerrors.Errorf("error loading mpool config: %w", err) } @@ -601,7 +601,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err return xerrors.Errorf("error serializing message: %w", err) } - if err := mp.localMsgs.Put(datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { + if err := mp.localMsgs.Put(ctx, datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { return xerrors.Errorf("persisting local message: %w", err) } @@ -909,12 +909,12 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st mp.blsSigCache.Add(m.Cid(), m.Signature) } - if _, err := mp.api.PutMessage(m); err != nil { + if _, err := mp.api.PutMessage(ctx, m); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } - if _, err := mp.api.PutMessage(&m.Message); err != nil { + if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } @@ -1207,7 +1207,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a var merr error for _, ts := range revert { - pts, err := mp.api.LoadTipSet(ts.Parents()) + pts, err := mp.api.LoadTipSet(ctx, ts.Parents()) if err != nil { log.Errorf("error loading reverted tipset parent: %s", err) merr = multierror.Append(merr, err) @@ -1216,7 +1216,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = pts - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1232,7 +1232,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = ts for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1338,7 +1338,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a return merr } -func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { +func (mp *MessagePool) runHeadChange(ctx context.Context, from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { add := func(m *types.SignedMessage) { s, ok := rmsgs[m.Message.From] if !ok { @@ -1360,7 +1360,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs } - revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to) + revert, apply, err := store.ReorgOps(ctx, mp.api.LoadTipSet, from, to) if err != nil { return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err) } @@ -1368,7 +1368,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs var merr error for _, ts := range revert { - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1382,7 +1382,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs for _, ts := range apply { for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1407,11 +1407,11 @@ type statBucket struct { msgs map[uint64]*types.SignedMessage } -func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.SignedMessage, error) { +func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.BlockHeader) ([]*types.SignedMessage, error) { out := make([]*types.SignedMessage, 0) for _, b := range blks { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) } @@ -1477,7 +1477,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err } func (mp *MessagePool) loadLocal(ctx context.Context) error { - res, err := mp.localMsgs.Query(query.Query{}) + res, err := mp.localMsgs.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query local messages: %w", err) } @@ -1525,7 +1525,7 @@ func (mp *MessagePool) Clear(ctx context.Context, local bool) { if ok { for _, m := range mset.msgs { - err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + err := mp.localMsgs.Delete(ctx, datastore.NewKey(string(m.Cid().Bytes()))) if err != nil { log.Warnf("error deleting local message: %s", err) } diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 4a2bbfe94..47cf56eb3 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -103,7 +103,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return tma.tipsets[0] } -func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { +func (tma *testMpoolAPI) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } @@ -164,16 +164,16 @@ func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr add return addr, nil } -func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { +func (tma *testMpoolAPI) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { return nil, tma.bmsgs[h.Cid()], nil } -func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { +func (tma *testMpoolAPI) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { if len(ts.Blocks()) != 1 { panic("cant deal with multiblock tipsets in this test") } - bm, sm, err := tma.MessagesForBlock(ts.Blocks()[0]) + bm, sm, err := tma.MessagesForBlock(ctx, ts.Blocks()[0]) if err != nil { return nil, err } @@ -190,7 +190,7 @@ func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, return out, nil } -func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (tma *testMpoolAPI) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { for _, ts := range tma.tipsets { if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { return ts, nil @@ -233,7 +233,7 @@ func TestMessagePool(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -277,7 +277,7 @@ func TestCheckMessageBig(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) assert.NoError(t, err) to := mock.Address(1001) @@ -340,7 +340,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -389,7 +389,7 @@ func TestRevertMessages(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -452,7 +452,7 @@ func TestPruningSimple(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -496,7 +496,7 @@ func TestLoadLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -539,7 +539,7 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - mp, err = New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err = New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -568,7 +568,7 @@ func TestClearAll(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -622,7 +622,7 @@ func TestClearNonLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -683,7 +683,7 @@ func TestUpdates(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 0f904c52c..9dcff2b33 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -23,13 +23,13 @@ var ( type Provider interface { SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet - PutMessage(m types.ChainMsg) (cid.Cid, error) + PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) PubSubPublish(string, []byte) error GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) - MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) - MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) - LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(context.Context, *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + MessagesForTipset(context.Context, *types.TipSet) ([]types.ChainMsg, error) + LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) IsLite() bool } @@ -66,8 +66,8 @@ func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return mpp.sm.ChainStore().GetHeaviestTipSet() } -func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) { - return mpp.sm.ChainStore().PutMessage(m) +func (mpp *mpoolProvider) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { + return mpp.sm.ChainStore().PutMessage(ctx, m) } func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { @@ -103,16 +103,16 @@ func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr ad return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts) } -func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return mpp.sm.ChainStore().MessagesForBlock(h) +func (mpp *mpoolProvider) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + return mpp.sm.ChainStore().MessagesForBlock(ctx, h) } -func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - return mpp.sm.ChainStore().MessagesForTipset(ts) +func (mpp *mpoolProvider) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + return mpp.sm.ChainStore().MessagesForTipset(ctx, ts) } -func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - return mpp.sm.ChainStore().LoadTipSet(tsk) +func (mpp *mpoolProvider) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.ChainStore().LoadTipSet(ctx, tsk) } func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go index c10239b8e..d405afb65 100644 --- a/chain/messagepool/pruning.go +++ b/chain/messagepool/pruning.go @@ -49,7 +49,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro } baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) - pending, _ := mp.getPendingMessages(ts, ts) + pending, _ := mp.getPendingMessages(ctx, ts, ts) // protected actors -- not pruned protected := make(map[address.Address]struct{}) diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index 4323bdee1..d92b5bd58 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -121,7 +121,7 @@ loop: // we can't fit the current chain but there is gas to spare // trim it and push it down - chain.Trim(gasLimit, mp, baseFee) + chain.Trim(gasLimit, repubMsgLimit, mp, baseFee) for j := i; j < len(chains)-1; j++ { if chains[j].Before(chains[j+1]) { break @@ -131,6 +131,10 @@ loop: } count := 0 + if len(msgs) > repubMsgLimit { + msgs = msgs[:repubMsgLimit] + } + log.Infof("republishing %d messages", len(msgs)) for _, m := range msgs { mb, err := m.Serialize() diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index fa27d68ed..de32eaa6b 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -25,7 +25,7 @@ func TestRepubMessages(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index acff7c4cf..633e9b23f 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -7,6 +7,10 @@ import ( "sort" "time" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/crypto" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -34,9 +38,10 @@ type msgChain struct { merged bool next *msgChain prev *msgChain + sigType crypto.SigType } -func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -46,24 +51,156 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq // if the ticket quality is high enough that the first block has higher probability // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance + var sm *selectedMessages + var err error if tq > 0.84 { - msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) + sm, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) } else { - msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) + sm, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) } if err != nil { return nil, err } - if len(msgs) > build.BlockMessageLimit { - msgs = msgs[:build.BlockMessageLimit] + if sm == nil { + return nil, nil } - return msgs, nil + // one last sanity check + if len(sm.msgs) > build.BlockMessageLimit { + log.Errorf("message selection chose too many messages %d > %d", len(sm.msgs), build.BlockMessageLimit) + sm.msgs = sm.msgs[:build.BlockMessageLimit] + } + + return sm.msgs, nil } -func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +type selectedMessages struct { + msgs []*types.SignedMessage + gasLimit int64 + secpLimit int + blsLimit int +} + +// returns false if chain can't be added due to block constraints +func (sm *selectedMessages) tryToAdd(mc *msgChain) bool { + l := len(mc.msgs) + + if build.BlockMessageLimit < l+len(sm.msgs) || sm.gasLimit < mc.gasLimit { + return false + } + + if mc.sigType == crypto.SigTypeBLS { + if sm.blsLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.blsLimit -= l + sm.gasLimit -= mc.gasLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if sm.secpLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.secpLimit -= l + sm.gasLimit -= mc.gasLimit + } + + // don't add the weird sigType msg, but otherwise proceed + return true +} + +// returns false if messages can't be added due to block constraints +// will trim / invalidate chain as appropriate +func (sm *selectedMessages) tryToAddWithDeps(mc *msgChain, mp *MessagePool, baseFee types.BigInt) bool { + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := mc.gasLimit + chainMsgLimit := len(mc.msgs) + depGasLimit := int64(0) + depMsgLimit := 0 + smMsgLimit := 0 + + if mc.sigType == crypto.SigTypeBLS { + smMsgLimit = sm.blsLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + smMsgLimit = sm.secpLimit + } else { + return false + } + + if smMsgLimit > build.BlockMessageLimit-len(sm.msgs) { + smMsgLimit = build.BlockMessageLimit - len(sm.msgs) + } + + var chainDeps []*msgChain + for curChain := mc.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + chainMsgLimit += len(curChain.msgs) + depGasLimit += curChain.gasLimit + depMsgLimit += len(curChain.msgs) + } + + // the chain doesn't fit as-is, so trim / invalidate it and return false + if chainGasLimit > sm.gasLimit || chainMsgLimit > smMsgLimit { + + // it doesn't all fit; now we have to take into account the dependent chains before + // making a decision about trimming or invalidating. + // if the dependencies exceed the block limits, then we must invalidate the chain + // as it can never be included. + // Otherwise we can just trim and continue + if depGasLimit > sm.gasLimit || depMsgLimit >= smMsgLimit { + mc.Invalidate() + } else { + // dependencies fit, just trim it + mc.Trim(sm.gasLimit-depGasLimit, smMsgLimit-depMsgLimit, mp, baseFee) + } + + return false + } + + // the chain fits! include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + sm.msgs = append(sm.msgs, curChain.msgs...) + } + + mc.merged = true + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.gasLimit -= chainGasLimit + + if mc.sigType == crypto.SigTypeBLS { + sm.blsLimit -= chainMsgLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + sm.secpLimit -= chainMsgLimit + } + + return true +} + +func (sm *selectedMessages) trimChain(mc *msgChain, mp *MessagePool, baseFee types.BigInt) { + msgLimit := build.BlockMessageLimit - len(sm.msgs) + if mc.sigType == crypto.SigTypeBLS { + if msgLimit > sm.blsLimit { + msgLimit = sm.blsLimit + } + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if msgLimit > sm.secpLimit { + msgLimit = sm.secpLimit + } + } + + if mc.gasLimit > sm.gasLimit || len(mc.msgs) > msgLimit { + mc.Trim(sm.gasLimit, msgLimit, mp, baseFee) + } +} + +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -73,7 +210,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0. Load messages from the target tipset; if it is the same as the current tipset in // the mpool, then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -89,10 +226,10 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { return result, nil } @@ -117,19 +254,21 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ return result, nil } - // 3. Parition chains into blocks (without trimming) + // 3. Partition chains into blocks (without trimming) // we use the full blockGasLimit (as opposed to the residual gas limit from the - // priority message selection) as we have to account for what other miners are doing + // priority message selection) as we have to account for what other block providers are doing nextChain := 0 partitions := make([][]*msgChain, MaxBlocks) for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { gasLimit := int64(build.BlockGasLimit) + msgLimit := build.BlockMessageLimit for nextChain < len(chains) { chain := chains[nextChain] nextChain++ partitions[i] = append(partitions[i], chain) gasLimit -= chain.gasLimit - if gasLimit < minGas { + msgLimit -= len(chain.msgs) + if gasLimit < minGas || msgLimit <= 0 { break } } @@ -158,7 +297,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ }) // 6. Merge the head chains to produce the list of messages selected for inclusion - // subject to the residual gas limit + // subject to the residual block limits // When a chain is merged in, all its previous dependent chains *must* also be // merged in or we'll have a broken block startMerge := time.Now() @@ -174,35 +313,16 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - } - - // does it all fit in the block? - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - // adjust the effective pefromance for all subsequent chains + if result.tryToAddWithDeps(chain, mp, baseFee) { + // adjust the effective performance for all subsequent chains if next := chain.next; next != nil && next.effPerf > 0 { next.effPerf += next.parentOffset for next = next.next; next != nil && next.effPerf > 0; next = next.next { next.setEffPerf() } } - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit - // resort to account for already merged chains and effective performance adjustments + // re-sort to account for already merged chains and effective performance adjustments // the sort *must* be stable or we end up getting negative gasPerfs pushed up. sort.SliceStable(chains[i+1:], func(i, j int) bool { return chains[i].BeforeEffective(chains[j]) @@ -211,7 +331,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // we can't fit this chain and its dependencies because of block gasLimit -- we are + // we can't fit this chain and its dependencies because of block limits -- we are // at the edge last = i break @@ -222,18 +342,22 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 7. We have reached the edge of what can fit wholesale; if we still hae available // gasLimit to pack some more chains, then trim the last chain and push it down. - // Trimming invalidaates subsequent dependent chains so that they can't be selected + // Trimming invalidates subsequent dependent chains so that they can't be selected // as their dependency cannot be (fully) included. // We do this in a loop because the blocker might have been inordinately large and // we might have to do it multiple times to satisfy tail packing startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { - // trim if necessary - if chains[last].gasLimit > gasLimit { - chains[last].Trim(gasLimit, mp, baseFee) + for result.gasLimit >= minGas && last < len(chains) { + + if !chains[last].valid { + last++ + continue tailLoop } + // trim if necessary + result.trimChain(chains[last], mp, baseFee) + // push down if it hasn't been invalidated if chains[last].valid { for i := last; i < len(chains)-1; i++ { @@ -245,7 +369,7 @@ tailLoop: } // select the next (valid and fitting) chain and its dependencies for inclusion - for i, chain := range chains[last:] { + for _, chain := range chains[last:] { // has the chain been invalidated? if !chain.valid { continue @@ -261,45 +385,10 @@ tailLoop: break tailLoop } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // does it all fit in the bock - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // it doesn't all fit; now we have to take into account the dependent chains before - // making a decision about trimming or invalidating. - // if the dependencies exceed the gas limit, then we must invalidate the chain - // as it can never be included. - // Otherwise we can just trim and continue - if depGasLimit > gasLimit { - chain.Invalidate() - last += i + 1 - continue tailLoop - } - - // dependencies fit, just trim it - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - last += i continue tailLoop } @@ -311,17 +400,17 @@ tailLoop: log.Infow("pack tail chains done", "took", dt) } - // if we have gasLimit to spare, pick some random (non-negative) chains to fill the block - // we pick randomly so that we minimize the probability of duplication among all miners - if gasLimit >= minGas { - randomCount := 0 + // if we have room to spare, pick some random (non-negative) chains to fill the block + // we pick randomly so that we minimize the probability of duplication among all block producers + if result.gasLimit >= minGas && len(result.msgs) <= build.BlockMessageLimit { + preRandomLength := len(result.msgs) startRandom := time.Now() shuffleChains(chains) for _, chain := range chains { // have we filled the block - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { break } @@ -335,59 +424,31 @@ tailLoop: continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // do the deps fit? if the deps won't fit, invalidate the chain - if depGasLimit > gasLimit { - chain.Invalidate() + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // do they fit as is? if it doesn't, trim to make it fit if possible - if chainGasLimit > gasLimit { - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - - if !chain.valid { - continue - } + if chain.valid { + // chain got trimmed on the previous call to tryToAddWithDeps, can now be included + result.tryToAddWithDeps(chain, mp, baseFee) + continue } - - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - randomCount += len(curChain.msgs) - } - - chain.merged = true - result = append(result, chain.msgs...) - randomCount += len(chain.msgs) - gasLimit -= chainGasLimit } if dt := time.Since(startRandom); dt > time.Millisecond { log.Infow("pack random tail chains done", "took", dt) } - if randomCount > 0 { + if len(result.msgs) != preRandomLength { log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection", - randomCount) + len(result.msgs)-preRandomLength) } } return result, nil } -func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -397,7 +458,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0. Load messages for the target tipset; if it is the same as the current tipset in the mpool // then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -413,10 +474,10 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) > build.BlockMessageLimit { return result, nil } @@ -442,7 +503,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // 3. Merge the head chains to produce the list of messages selected for inclusion, subject to - // the block gas limit. + // the block gas and message limits. startMerge := time.Now() last := len(chains) for i, chain := range chains { @@ -452,13 +513,12 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // does it fit in the block? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } - // we can't fit this chain because of block gasLimit -- we are at the edge + // we can't fit this chain because of block limits -- we are at the edge last = i break } @@ -474,9 +534,9 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // have to do it multiple times to satisfy tail packing. startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim - chains[last].Trim(gasLimit, mp, baseFee) + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -501,9 +561,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -523,7 +582,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *selectedMessages { start := time.Now() defer func() { if dt := time.Since(start); dt > time.Millisecond { @@ -531,8 +590,12 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } }() mpCfg := mp.getConfig() - result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow) - gasLimit := int64(build.BlockGasLimit) + result := &selectedMessages{ + msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow), + gasLimit: int64(build.BlockGasLimit), + blsLimit: cbg.MaxLength, + secpLimit: cbg.MaxLength, + } minGas := int64(gasguess.MinGas) // 1. Get priority actor chains @@ -542,7 +605,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a pk, err := mp.resolveToKey(ctx, actor) if err != nil { log.Debugf("mpooladdlocal failed to resolve sender: %s", err) - return nil, gasLimit + return result } mset, ok := pending[pk] @@ -554,9 +617,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a chains = append(chains, next...) } } - if len(chains) == 0 { - return nil, gasLimit + return result } // 2. Sort the chains @@ -566,7 +628,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) - return nil, gasLimit + return result } // 3. Merge chains until the block limit, as long as they have non-negative gas performance @@ -576,9 +638,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a break } - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -588,9 +649,10 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim, discarding negative performing messages - chains[last].Trim(gasLimit, mp, baseFee) + + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -615,9 +677,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -631,10 +692,10 @@ tailLoop: break } - return result, gasLimit + return result } -func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { +func (mp *MessagePool) getPendingMessages(ctx context.Context, curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { start := time.Now() result := make(map[address.Address]map[uint64]*types.SignedMessage) @@ -670,7 +731,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. return result, nil } - if err := mp.runHeadChange(curTs, ts, result); err != nil { + if err := mp.runHeadChange(ctx, curTs, ts, result); err != nil { return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err) } @@ -778,11 +839,16 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 return nil } + // if we have more messages from this sender than can fit in a block, drop the extra ones + if len(msgs) > build.BlockMessageLimit { + msgs = msgs[:build.BlockMessageLimit] + } + // ok, now we can construct the chains using the messages we have // invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged // and increase the gasPerf of the first chain // We do this in two passes: - // - in the first pass we create chains that aggreagate messages with non-decreasing gasPerf + // - in the first pass we create chains that aggregate messages with non-decreasing gasPerf // - in the second pass we merge chains to maintain the invariant. var chains []*msgChain var curChain *msgChain @@ -794,6 +860,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 chain.gasLimit = m.Message.GasLimit chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit) chain.valid = true + chain.sigType = m.Signature.Type return chain } @@ -808,7 +875,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 gasLimit := curChain.gasLimit + m.Message.GasLimit gasPerf := mp.getGasPerf(gasReward, gasLimit) - // try to add the message to the current chain -- if it decreases the gasPerf, then make a + // try to add the message to the current chain -- if it decreases the gasPerf, or then make a // new chain if gasPerf < curChain.gasPerf { chains = append(chains, curChain) @@ -868,9 +935,9 @@ func (mc *msgChain) Before(other *msgChain) bool { (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) } -func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) { +func (mc *msgChain) Trim(gasLimit int64, msgLimit int, mp *MessagePool, baseFee types.BigInt) { i := len(mc.msgs) - 1 - for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) { + for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0 || i >= msgLimit) { gasReward := mp.getGasReward(mc.msgs[i], baseFee) mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) mc.gasLimit -= mc.msgs[i].Message.GasLimit @@ -893,6 +960,7 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) mc.msgs = mc.msgs[:i+1] } + // TODO: if the trim above is a no-op, this (may) needlessly invalidates the next chain if mc.next != nil { mc.next.Invalidate() mc.next = nil diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 0f8fd8ee6..2ae99cd77 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -13,6 +13,10 @@ import ( "sort" "testing" + "github.com/filecoin-project/go-state-types/crypto" + + cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" @@ -61,7 +65,7 @@ func makeTestMessage(w *wallet.LocalWallet, from, to address.Address, nonce uint func makeTestMpool() (*MessagePool, *testMpoolAPI) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) if err != nil { panic(err) } @@ -527,7 +531,7 @@ func TestBasicMessageSelection(t *testing.T) { } } -func TestMessageSelectionTrimming(t *testing.T) { +func TestMessageSelectionTrimmingGas(t *testing.T) { mp, tma := makeTestMpool() // the actors @@ -577,7 +581,7 @@ func TestMessageSelectionTrimming(t *testing.T) { expected := int(build.BlockGasLimit / gasLimit) if len(msgs) != expected { - t.Fatalf("expected %d messages, bug got %d", expected, len(msgs)) + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) } mGasLimit := int64(0) @@ -590,6 +594,199 @@ func TestMessageSelectionTrimming(t *testing.T) { } +func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + + // create a larger than selectable chain + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := cbg.MaxLength + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + +} + +func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 larger than selectable chains + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + if counts[crypto.SigTypeBLS] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + +func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 almost max-length chains of equal value + i := 0 + for i = 0; i < cbg.MaxLength-1; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + // a1's 8192th message is worth more than a2's + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + + i++ + + // a2's (unselectable) 8193rd message is worth SO MUCH + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000000) + mustAdd(t, mp, m) + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + // we should have taken the secp chain + if counts[crypto.SigTypeSecp256k1] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + func TestPriorityMessageSelection(t *testing.T) { mp, tma := makeTestMpool() @@ -978,7 +1175,7 @@ func TestOptimalMessageSelection2(t *testing.T) { func TestOptimalMessageSelection3(t *testing.T) { // this test uses 10 actors sending a block of messages to each other, with the the first // actors paying higher gas premium than the subsequent actors. - // We select with a low ticket quality; the chain depenent merging algorithm should pick + // We select with a low ticket quality; the chain dependent merging algorithm should pick // messages from the median actor from the start mp, tma := makeTestMpool() @@ -1109,11 +1306,13 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu logging.SetLogLevel("messagepool", "error") // 1. greedy selection - greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts) + gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts) if err != nil { t.Fatal(err) } + greedyMsgs := gm.msgs + totalGreedyCapacity := 0.0 totalGreedyReward := 0.0 totalOptimalCapacity := 0.0 diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index 063d1aa7d..e2229bb51 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -84,7 +84,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb } // If the callback executed successfully, write the nonce to the datastore - if err := ms.saveNonce(msg.From, nonce); err != nil { + if err := ms.saveNonce(ctx, msg.From, nonce); err != nil { return nil, xerrors.Errorf("failed to save nonce: %w", err) } @@ -105,7 +105,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // Get the next nonce for this address from the datastore addrNonceKey := ms.dstoreKey(addr) - dsNonceBytes, err := ms.ds.Get(addrNonceKey) + dsNonceBytes, err := ms.ds.Get(ctx, addrNonceKey) switch { case xerrors.Is(err, datastore.ErrNotFound): @@ -139,7 +139,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // saveNonce increments the nonce for this address and writes it to the // datastore -func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { +func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error { // Increment the nonce nonce++ @@ -150,7 +150,7 @@ func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { if err != nil { return xerrors.Errorf("failed to marshall nonce: %w", err) } - err = ms.ds.Put(addrNonceKey, buf.Bytes()) + err = ms.ds.Put(ctx, addrNonceKey, buf.Bytes()) if err != nil { return xerrors.Errorf("failed to write nonce to datastore: %w", err) } diff --git a/chain/rand/rand.go b/chain/rand/rand.go index 90e9a514b..427648f2a 100644 --- a/chain/rand/rand.go +++ b/chain/rand/rand.go @@ -4,6 +4,8 @@ import ( "context" "encoding/binary" + "github.com/filecoin-project/go-state-types/network" + logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/beacon" @@ -48,7 +50,7 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -70,12 +72,12 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch return randTs, nil } -func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -101,38 +103,32 @@ func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainS return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy) } +type NetworkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + type stateRand struct { - cs *store.ChainStore - blks []cid.Cid - beacon beacon.Schedule + cs *store.ChainStore + blks []cid.Cid + beacon beacon.Schedule + networkVersionGetter NetworkVersionGetter } -func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule) vm.Rand { +func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) vm.Rand { return &stateRand{ - cs: cs, - blks: blks, - beacon: b, + cs: cs, + blks: blks, + beacon: b, + networkVersionGetter: networkVersionGetter, } } // network v0-12 -func (sr *stateRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, true) -} - -// network v13 and on -func (sr *stateRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, false) -} - -// network v0-12 -func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, true) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -143,13 +139,13 @@ func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.Doma } // network v13 -func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, false) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -160,9 +156,9 @@ func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.Doma } // network v14 and on -func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { if filecoinEpoch < 0 { - return sr.GetBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) } be, err := sr.extractBeaconEntryForEpoch(ctx, filecoinEpoch) @@ -174,6 +170,28 @@ func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.Doma return DrawRandomness(be.Data, pers, filecoinEpoch, entropy) } +func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + nv := sr.networkVersionGetter(ctx, filecoinEpoch) + + if nv >= network.Version13 { + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, false) + } + + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, true) +} + +func (sr *stateRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + nv := sr.networkVersionGetter(ctx, filecoinEpoch) + + if nv >= network.Version14 { + return sr.getBeaconRandomnessV3(ctx, pers, filecoinEpoch, entropy) + } else if nv == network.Version13 { + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + } else { + return sr.getBeaconRandomnessV1(ctx, pers, filecoinEpoch, entropy) + } +} + func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false) if err != nil { @@ -190,7 +208,7 @@ func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpo } } - next, err := sr.cs.LoadTipSet(randTs.Parents()) + next, err := sr.cs.LoadTipSet(ctx, randTs.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for beacon entry: %w", err) } diff --git a/chain/state/statetree.go b/chain/state/statetree.go index f230f7faa..9a518a622 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -159,7 +159,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { /* inline-gen start */ - case network.Version13, network.Version14: + case network.Version13, network.Version14, network.Version15: /* inline-gen end */ return types.StateTreeVersion4, nil diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 4d016b7ab..52773e1e4 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" cid "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" @@ -116,7 +117,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres return mas.GetSector(sid) } -func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { +func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) { act, err := sm.LoadActorRaw(ctx, maddr, st) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) @@ -202,12 +203,13 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra return nil, xerrors.Errorf("loading proving sectors: %w", err) } - out := make([]builtin.SectorInfo, len(sectors)) + out := make([]builtin.ExtendedSectorInfo, len(sectors)) for i, sinfo := range sectors { - out[i] = builtin.SectorInfo{ + out[i] = builtin.ExtendedSectorInfo{ SealProof: sinfo.SealProof, SectorNumber: sinfo.SectorNumber, SealedCID: sinfo.SealedCID, + SectorKey: sinfo.SectorKeyCID, } } @@ -300,12 +302,12 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([ } func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { - ts, err := sm.ChainStore().LoadTipSet(tsk) + ts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err) } - prev, err := sm.ChainStore().GetLatestBeaconEntry(ts) + prev, err := sm.ChainStore().GetLatestBeaconEntry(ctx, ts) if err != nil { if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" { return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err) @@ -357,7 +359,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) } - nv := sm.GetNtwkVersion(ctx, ts.Height()) + nv := sm.GetNetworkVersion(ctx, ts.Height()) sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand) if err != nil { @@ -419,7 +421,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add hmp, err := minerHasMinPower(ctx, sm, addr, lookbackTs) // TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable? - if sm.GetNtwkVersion(ctx, baseTs.Height()) <= network.Version3 { + if sm.GetNetworkVersion(ctx, baseTs.Height()) <= network.Version3 { return hmp, err } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 7cc50e710..31639701d 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -40,7 +40,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = sm.cs.GetHeaviestTipSet() // Search back till we find a height with no fork, or we reach the beginning. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -51,7 +51,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -75,12 +75,12 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. vmopt := &vm.VMOpts{ StateBase: bstate, Epoch: pheight + 1, - Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon), + Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion), Bstore: sm.cs.StateBlockstore(), Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1), BaseFee: types.NewInt(0), LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -155,7 +155,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri // height to have no fork, because we'll run it inside this // function before executing the given message. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -166,7 +166,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -186,7 +186,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri return nil, fmt.Errorf("failed to handle fork: %w", err) } - r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon) + r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) if span.IsRecordingEvents() { span.AddAttributes( @@ -204,7 +204,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1), BaseFee: ts.Blocks()[0].ParentBaseFee, LookbackState: LookbackStateGetterForTipset(sm, ts), } diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go index bc259f227..bca32429b 100644 --- a/chain/stmgr/read.go +++ b/chain/stmgr/read.go @@ -13,8 +13,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, error) { - ts, err := sm.cs.GetTipSetFromKey(tsk) +func (sm *StateManager) ParentStateTsk(ctx context.Context, tsk types.TipSetKey) (*state.StateTree, error) { + ts, err := sm.cs.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -57,8 +57,8 @@ func (sm *StateManager) LoadActor(_ context.Context, addr address.Address, ts *t return state.GetActor(addr) } -func (sm *StateManager) LoadActorTsk(_ context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { - state, err := sm.ParentStateTsk(tsk) +func (sm *StateManager) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + state, err := sm.ParentStateTsk(ctx, tsk) if err != nil { return nil, err } diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 45c98a855..7e6de91d4 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -20,7 +20,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid ctx, cancel := context.WithCancel(ctx) defer cancel() - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } @@ -40,7 +40,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) } - r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head[0].Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -93,7 +93,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { return candidateTs, candidateRcp, candidateFm, nil } - r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, val.Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -130,12 +130,12 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid } func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } - r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -201,7 +201,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet return nil, nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(cur.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("failed to load tipset during msg wait searchback: %w", err) } @@ -214,7 +214,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet // check that between cur and parent tipset the nonce fell into range of our message if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) { - r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, cur, m.Cid(), m.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err) } @@ -229,18 +229,18 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet } } -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) tipsetExecutedMessage(ctx context.Context, ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { // The genesis block did not execute any messages if ts.Height() == 0 { return nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, cid.Undef, err } - cm, err := sm.cs.MessagesForTipset(pts) + cm, err := sm.cs.MessagesForTipset(ctx, pts) if err != nil { return nil, cid.Undef, err } @@ -267,7 +267,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm } } - pr, err := sm.cs.GetParentReceipt(ts.Blocks()[0], i) + pr, err := sm.cs.GetParentReceipt(ctx, ts.Blocks()[0], i) if err != nil { return nil, cid.Undef, err } diff --git a/chain/stmgr/searchwait_test.go b/chain/stmgr/searchwait_test.go index 1e4776ff7..b8cd7ddcf 100644 --- a/chain/stmgr/searchwait_test.go +++ b/chain/stmgr/searchwait_test.go @@ -75,7 +75,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(rmb) + err = cg.Blockstore().Put(ctx, rmb) if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(nrmb) + err = cg.Blockstore().Put(ctx, nrmb) if err != nil { t.Fatal(err) } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 67c0bdb6a..fd3558a1c 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -320,7 +320,7 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts * func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error { tschain := []*types.TipSet{ts} for ts.Height() != 0 { - next, err := sm.cs.LoadTipSet(ts.Parents()) + next, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return err } @@ -356,7 +356,7 @@ func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.V } } -func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { +func (sm *StateManager) GetNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { // The epochs here are the _last_ epoch for every version, or -1 if the // version is disabled. for _, spec := range sm.networkVersions { @@ -372,36 +372,24 @@ func (sm *StateManager) VMSys() vm.SyscallBuilder { } func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().GetTipSetFromKey(tsk) + pts, err := sm.ChainStore().GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) - rnv := sm.GetNtwkVersion(ctx, randEpoch) + r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion) - if rnv >= network.Version14 { - return r.GetBeaconRandomnessV3(ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - return r.GetBeaconRandomnessV2(ctx, personalization, randEpoch, entropy) - } - - return r.GetBeaconRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetBeaconRandomness(ctx, personalization, randEpoch, entropy) } func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().LoadTipSet(tsk) + pts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset key: %w", err) } - r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) - rnv := sm.GetNtwkVersion(ctx, randEpoch) + r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion) - if rnv >= network.Version13 { - return r.GetChainRandomnessV2(ctx, personalization, randEpoch, entropy) - } - - return r.GetChainRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetChainRandomness(ctx, personalization, randEpoch, entropy) } diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index c9475a51e..0744c02aa 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -31,7 +31,7 @@ import ( // sets up information about the vesting schedule func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { - gb, err := sm.cs.GetGenesis() + gb, err := sm.cs.GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index ce4f60105..2a84c777b 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -79,7 +79,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, // future. It's not guaranteed to be accurate... but that's fine. } - r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon) + r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) vmopt := &vm.VMOpts{ StateBase: base, Epoch: height, @@ -88,7 +88,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, height), BaseFee: ts.Blocks()[0].ParentBaseFee, LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -128,7 +128,7 @@ func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.Lookbac func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) { var lbr abi.ChainEpoch - lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round)) + lb := policy.GetWinningPoStSectorSetLookback(sm.GetNetworkVersion(ctx, round)) if round > lb { lbr = round - lb } @@ -155,7 +155,7 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types. } - lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents()) + lbts, err := sm.ChainStore().GetTipSetFromKey(ctx, nextTs.Parents()) if err != nil { return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err) } diff --git a/chain/store/basefee.go b/chain/store/basefee.go index 33367abcc..1d6b0760e 100644 --- a/chain/store/basefee.go +++ b/chain/store/basefee.go @@ -58,7 +58,7 @@ func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi seen := make(map[cid.Cid]struct{}) for _, b := range ts.Blocks() { - msg1, msg2, err := cs.MessagesForBlock(b) + msg1, msg2, err := cs.MessagesForBlock(ctx, b) if err != nil { return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err) } diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go index 81bbab6ea..73b45f3ad 100644 --- a/chain/store/checkpoint_test.go +++ b/chain/store/checkpoint_test.go @@ -10,6 +10,8 @@ import ( ) func TestChainCheckpoint(t *testing.T) { + ctx := context.Background() + cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) @@ -27,11 +29,11 @@ func TestChainCheckpoint(t *testing.T) { cs := cg.ChainStore() checkpoint := last - checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents()) + checkpointParents, err := cs.GetTipSetFromKey(ctx, checkpoint.Parents()) require.NoError(t, err) // Set the head to the block before the checkpoint. - err = cs.SetHead(checkpointParents) + err = cs.SetHead(ctx, checkpointParents) require.NoError(t, err) // Verify it worked. @@ -39,11 +41,11 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpointParents)) // Try to set the checkpoint in the future, it should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Then move the head back. - err = cs.SetHead(checkpoint) + err = cs.SetHead(ctx, checkpoint) require.NoError(t, err) // Verify it worked. @@ -51,7 +53,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // And checkpoint it. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.NoError(t, err) // Let the second miner miner mine a fork @@ -70,7 +72,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // Remove the checkpoint. - err = cs.RemoveCheckpoint() + err = cs.RemoveCheckpoint(ctx) require.NoError(t, err) // Now switch to the other fork. @@ -80,10 +82,10 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(last)) // Setting a checkpoint on the other fork should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Setting a checkpoint on this fork should succeed. - err = cs.SetCheckpoint(checkpointParents) + err = cs.SetCheckpoint(ctx, checkpointParents) require.NoError(t, err) } diff --git a/chain/store/index.go b/chain/store/index.go index 324fb7a63..f5bbbd438 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -31,7 +31,7 @@ type ChainIndex struct { skipLength abi.ChainEpoch } -type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) +type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) func NewChainIndex(lts loadTipSetFunc) *ChainIndex { sc, _ := lru.NewARC(DefaultChainIndexCacheSize) @@ -49,12 +49,12 @@ type lbEntry struct { target types.TipSetKey } -func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if from.Height()-to <= ci.skipLength { - return ci.walkBack(from, to) + return ci.walkBack(ctx, from, to) } - rounded, err := ci.roundDown(from) + rounded, err := ci.roundDown(ctx, from) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t for { cval, ok := ci.skipCache.Get(cur) if !ok { - fc, err := ci.fillCache(cur) + fc, err := ci.fillCache(ctx, cur) if err != nil { return nil, err } @@ -74,19 +74,19 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t if lbe.ts.Height() == to || lbe.parentHeight < to { return lbe.ts, nil } else if to > lbe.targetHeight { - return ci.walkBack(lbe.ts, to) + return ci.walkBack(ctx, lbe.ts, to) } cur = lbe.target } } -func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - return ci.walkBack(from, to) +func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + return ci.walkBack(ctx, from, to) } -func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { - ts, err := ci.loadTipSet(tsk) +func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { + ts, err := ci.loadTipSet(ctx, tsk) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { // will either be equal to ts.Height, or at least > ts.Parent.Height() rheight := ci.roundHeight(ts.Height()) - parent, err := ci.loadTipSet(ts.Parents()) + parent, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { if parent.Height() < rheight { skipTarget = parent } else { - skipTarget, err = ci.walkBack(parent, rheight) + skipTarget, err = ci.walkBack(ctx, parent, rheight) if err != nil { return nil, xerrors.Errorf("fillCache walkback: %w", err) } @@ -137,10 +137,10 @@ func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { return (h / ci.skipLength) * ci.skipLength } -func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { +func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { target := ci.roundHeight(ts.Height()) - rounded, err := ci.walkBack(ts, target) + rounded, err := ci.walkBack(ctx, ts, target) if err != nil { return nil, err } @@ -148,7 +148,7 @@ func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { return rounded, nil } -func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if to > from.Height() { return nil, xerrors.Errorf("looking for tipset with height greater than start point") } @@ -160,7 +160,7 @@ func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.Ti ts := from for { - pts, err := ci.loadTipSet(ts.Parents()) + pts, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 9bc31e5a8..b7f1d570f 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -35,7 +35,7 @@ func TestIndexSeeks(t *testing.T) { cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - _, err = cs.Import(bytes.NewReader(gencar)) + _, err = cs.Import(ctx, bytes.NewReader(gencar)) if err != nil { t.Fatal(err) } @@ -44,7 +44,7 @@ func TestIndexSeeks(t *testing.T) { if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { t.Fatal(err) } - assert.NoError(t, cs.SetGenesis(gen)) + assert.NoError(t, cs.SetGenesis(ctx, gen)) // Put 113 blocks from genesis for i := 0; i < 113; i++ { diff --git a/chain/store/messages.go b/chain/store/messages.go index 07ce83458..4dd3bfc1d 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -23,25 +23,25 @@ type storable interface { ToStorageBlock() (block.Block, error) } -func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { +func PutMessage(ctx context.Context, bs bstore.Blockstore, m storable) (cid.Cid, error) { b, err := m.ToStorageBlock() if err != nil { return cid.Undef, err } - if err := bs.Put(b); err != nil { + if err := bs.Put(ctx, b); err != nil { return cid.Undef, err } return b.Cid(), nil } -func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.chainBlockstore, m) +func (cs *ChainStore) PutMessage(ctx context.Context, m storable) (cid.Cid, error) { + return PutMessage(ctx, cs.chainBlockstore, m) } -func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { - m, err := cs.GetMessage(c) +func (cs *ChainStore) GetCMessage(ctx context.Context, c cid.Cid) (types.ChainMsg, error) { + m, err := cs.GetMessage(ctx, c) if err == nil { return m, nil } @@ -49,21 +49,21 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) } - return cs.GetSignedMessage(c) + return cs.GetSignedMessage(ctx, c) } -func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { +func (cs *ChainStore) GetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) { var msg *types.Message - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeMessage(b) return err }) return msg, err } -func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { +func (cs *ChainStore) GetSignedMessage(ctx context.Context, c cid.Cid) (*types.SignedMessage, error) { var msg *types.SignedMessage - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeSignedMessage(b) return err }) @@ -103,7 +103,7 @@ type BlockMessages struct { SecpkMessages []types.ChainMsg } -func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { +func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet) ([]BlockMessages, error) { // returned BlockMessages match block order in tipset applied := make(map[address.Address]uint64) @@ -142,7 +142,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err var out []BlockMessages for _, b := range ts.Blocks() { - bms, sms, err := cs.MessagesForBlock(b) + bms, sms, err := cs.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for block: %w", err) } @@ -181,8 +181,8 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err return out, nil } -func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - bmsgs, err := cs.BlockMsgsForTipset(ts) +func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + bmsgs, err := cs.BlockMsgsForTipset(ctx, ts) if err != nil { return nil, err } @@ -206,7 +206,7 @@ type mmCids struct { secpk []cid.Cid } -func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { +func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { o, ok := cs.mmCache.Get(mmc) if ok { mmcids := o.(*mmCids) @@ -215,7 +215,7 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) cst := cbor.NewCborStore(cs.chainLocalBlockstore) var msgmeta types.MsgMeta - if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { + if err := cst.Get(ctx, mmc, &msgmeta); err != nil { return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) } @@ -237,18 +237,18 @@ func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) return blscids, secpkcids, nil } -func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages) +func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + blscids, secpkcids, err := cs.ReadMsgMetaCids(ctx, b.Messages) if err != nil { return nil, nil, err } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) if err != nil { return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) } @@ -256,8 +256,7 @@ func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, return blsmsgs, secpkmsgs, nil } -func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { - ctx := context.TODO() +func (cs *ChainStore) GetParentReceipt(ctx context.Context, b *types.BlockHeader, i int) (*types.MessageReceipt, error) { // block headers use adt0, for now. a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts) if err != nil { @@ -274,10 +273,10 @@ func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.Mess return &r, nil } -func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) { +func (cs *ChainStore) LoadMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) { msgs := make([]*types.Message, 0, len(cids)) for i, c := range cids { - m, err := cs.GetMessage(c) + m, err := cs.GetMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } @@ -288,10 +287,10 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er return msgs, nil } -func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) { +func (cs *ChainStore) LoadSignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) { msgs := make([]*types.SignedMessage, 0, len(cids)) for i, c := range cids { - m, err := cs.GetSignedMessage(c) + m, err := cs.GetSignedMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 1d4ce3758..61fa8bdc8 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -30,7 +30,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { - blk, err := unionBs.Get(c) + blk, err := unionBs.Get(ctx, c) if err != nil { return xerrors.Errorf("writing object to car, bs.Get: %w", err) } @@ -43,18 +43,18 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { +func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) { // TODO: writing only to the state blockstore is incorrect. // At this time, both the state and chain blockstores are backed by the // universal store. When we physically segregate the stores, we will need // to route state objects to the state blockstore, and chain objects to // the chain blockstore. - header, err := car.LoadCar(cs.StateBlockstore(), r) + header, err := car.LoadCar(ctx, cs.StateBlockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } - root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) + root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(header.Roots...)) if err != nil { return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) } @@ -82,7 +82,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return err } - data, err := cs.chainBlockstore.Get(blk) + data, err := cs.chainBlockstore.Get(ctx, blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } @@ -102,7 +102,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.Messages) { - mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) + mcids, err := recurseLinks(ctx, cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } @@ -123,7 +123,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.ParentStateRoot) { - cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + cids, err := recurseLinks(ctx, cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } @@ -168,12 +168,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return nil } -func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { +func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { if root.Prefix().Codec != cid.DagCBOR { return in, nil } - data, err := bs.Get(root) + data, err := bs.Get(ctx, root) if err != nil { return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) } @@ -192,7 +192,7 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid. in = append(in, c) var err error - in, err = recurseLinks(bs, walked, c, in) + in, err = recurseLinks(ctx, bs, walked, c, in) if err != nil { rerr = err } diff --git a/chain/store/store.go b/chain/store/store.go index f99c7f649..d86a66f12 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -207,17 +207,17 @@ func (cs *ChainStore) Close() error { return nil } -func (cs *ChainStore) Load() error { - if err := cs.loadHead(); err != nil { +func (cs *ChainStore) Load(ctx context.Context) error { + if err := cs.loadHead(ctx); err != nil { return err } - if err := cs.loadCheckpoint(); err != nil { + if err := cs.loadCheckpoint(ctx); err != nil { return err } return nil } -func (cs *ChainStore) loadHead() error { - head, err := cs.metadataDs.Get(chainHeadKey) +func (cs *ChainStore) loadHead(ctx context.Context) error { + head, err := cs.metadataDs.Get(ctx, chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil @@ -231,7 +231,7 @@ func (cs *ChainStore) loadHead() error { return xerrors.Errorf("failed to unmarshal stored chain head: %w", err) } - ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...)) + ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...)) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -241,8 +241,8 @@ func (cs *ChainStore) loadHead() error { return nil } -func (cs *ChainStore) loadCheckpoint() error { - tskBytes, err := cs.metadataDs.Get(checkpointKey) +func (cs *ChainStore) loadCheckpoint(ctx context.Context) error { + tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey) if err == dstore.ErrNotFound { return nil } @@ -256,7 +256,7 @@ func (cs *ChainStore) loadCheckpoint() error { return err } - ts, err := cs.LoadTipSet(tsk) + ts, err := cs.LoadTipSet(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -266,13 +266,13 @@ func (cs *ChainStore) loadCheckpoint() error { return nil } -func (cs *ChainStore) writeHead(ts *types.TipSet) error { +func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { return xerrors.Errorf("failed to marshal tipset: %w", err) } - if err := cs.metadataDs.Put(chainHeadKey, data); err != nil { + if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } @@ -341,13 +341,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - return cs.metadataDs.Has(key) + return cs.metadataDs.Has(ctx, key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Put(key, []byte{0}); err != nil { + if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } @@ -357,34 +357,34 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Delete(key); err != nil { + if err := cs.metadataDs.Delete(ctx, key); err != nil { return xerrors.Errorf("removing from valid block cache: %w", err) } return nil } -func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { +func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error { ts, err := types.NewTipSet([]*types.BlockHeader{b}) if err != nil { return err } - if err := cs.PutTipSet(context.TODO(), ts); err != nil { + if err := cs.PutTipSet(ctx, ts); err != nil { return err } - return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes()) + return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { for _, b := range ts.Blocks() { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } } - expanded, err := cs.expandTipset(ts.Blocks()[0]) + expanded, err := cs.expandTipset(ctx, ts.Blocks()[0]) if err != nil { return xerrors.Errorf("errored while expanding tipset: %w", err) } @@ -435,7 +435,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. - exceeds, err := cs.exceedsForkLength(cs.heaviest, ts) + exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts) if err != nil { return err } @@ -458,7 +458,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // FIXME: We may want to replace some of the logic in `syncFork()` with this. // `syncFork()` counts the length on both sides of the fork at the moment (we // need to settle on that) but here we just enforce it on the `synced` side. -func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) { +func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { if synced == nil || external == nil { // FIXME: If `cs.heaviest` is nil we should just bypass the entire // `MaybeTakeHeavierTipSet` logic (instead of each of the called @@ -482,7 +482,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // length). return true, nil } - external, err = cs.LoadTipSet(external.Parents()) + external, err = cs.LoadTipSet(ctx, external.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err) } @@ -505,7 +505,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // there is no common ancestor. return true, nil } - synced, err = cs.LoadTipSet(synced.Parents()) + synced, err = cs.LoadTipSet(ctx, synced.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err) } @@ -521,17 +521,17 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // CAUTION: Use it only for testing, such as to teleport the chain to a // particular tipset to carry out a benchmark, verification, etc. on a chain // segment. -func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error { +func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error { log.Warnf("(!!!) forcing a new head silently; new head: %s", ts) cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } cs.heaviest = ts - err := cs.writeHead(ts) + err := cs.writeHead(ctx, ts) if err != nil { err = xerrors.Errorf("failed to write chain head: %s", err) } @@ -561,7 +561,7 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo notifees = append(notifees, n) case r := <-out: - revert, apply, err := cs.ReorgOps(r.old, r.new) + revert, apply, err := cs.ReorgOps(ctx, r.old, r.new) if err != nil { log.Error("computing reorg ops failed: ", err) continue @@ -646,7 +646,7 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height()) cs.heaviest = ts - if err := cs.writeHead(ts); err != nil { + if err := cs.writeHead(ctx, ts); err != nil { log.Errorf("failed to write chain head: %s", err) return nil } @@ -656,14 +656,14 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. -func (cs *ChainStore) FlushValidationCache() error { - return FlushValidationCache(cs.metadataDs) +func (cs *ChainStore) FlushValidationCache(ctx context.Context) error { + return FlushValidationCache(ctx, cs.metadataDs) } -func FlushValidationCache(ds dstore.Batching) error { +func FlushValidationCache(ctx context.Context, ds dstore.Batching) error { log.Infof("clearing block validation cache...") - dsWalk, err := ds.Query(query.Query{ + dsWalk, err := ds.Query(ctx, query.Query{ // Potential TODO: the validation cache is not a namespace on its own // but is rather constructed as prefixed-key `foo:bar` via .Instance(), which // in turn does not work with the filter, which can match only on `foo/bar` @@ -683,7 +683,7 @@ func FlushValidationCache(ds dstore.Batching) error { return xerrors.Errorf("failed to run key listing query: %w", err) } - batch, err := ds.Batch() + batch, err := ds.Batch(ctx) if err != nil { return xerrors.Errorf("failed to open a DS batch: %w", err) } @@ -692,11 +692,11 @@ func FlushValidationCache(ds dstore.Batching) error { for _, k := range allKeys { if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) { delCnt++ - batch.Delete(dstore.RawKey(k.Key)) // nolint:errcheck + batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck } } - if err := batch.Commit(); err != nil { + if err := batch.Commit(ctx); err != nil { return xerrors.Errorf("failed to commit the DS batch: %w", err) } @@ -709,24 +709,24 @@ func FlushValidationCache(ds dstore.Batching) error { // This should only be called if something is broken and needs fixing. // // This function will bypass and remove any checkpoints. -func (cs *ChainStore) SetHead(ts *types.TipSet) error { +func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } return cs.takeHeaviestTipSet(context.TODO(), ts) } // RemoveCheckpoint removes the current checkpoint. -func (cs *ChainStore) RemoveCheckpoint() error { +func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - return cs.removeCheckpoint() + return cs.removeCheckpoint(ctx) } -func (cs *ChainStore) removeCheckpoint() error { - if err := cs.metadataDs.Delete(checkpointKey); err != nil { +func (cs *ChainStore) removeCheckpoint(ctx context.Context) error { + if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil { return err } cs.checkpoint = nil @@ -736,7 +736,7 @@ func (cs *ChainStore) removeCheckpoint() error { // SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. // // NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. -func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { +func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error { tskBytes, err := json.Marshal(ts.Key()) if err != nil { return err @@ -755,7 +755,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { } if !ts.Equals(cs.heaviest) { - anc, err := cs.IsAncestorOf(ts, cs.heaviest) + anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest) if err != nil { return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) } @@ -764,7 +764,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) } } - err = cs.metadataDs.Put(checkpointKey, tskBytes) + err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes) if err != nil { return err } @@ -781,9 +781,9 @@ func (cs *ChainStore) GetCheckpoint() *types.TipSet { } // Contains returns whether our BlockStore has all blocks in the supplied TipSet. -func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { +func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { - has, err := cs.chainBlockstore.Has(c) + has, err := cs.chainBlockstore.Has(ctx, c) if err != nil { return false, err } @@ -797,16 +797,16 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. -func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { +func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { var blk *types.BlockHeader - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { blk, err = types.DecodeBlock(b) return err }) return blk, err } -func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { v, ok := cs.tsCache.Get(tsk) if ok { return v.(*types.TipSet), nil @@ -819,7 +819,7 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { for i, c := range cids { i, c := i, c eg.Go(func() error { - b, err := cs.GetBlock(c) + b, err := cs.GetBlock(ctx, c) if err != nil { return xerrors.Errorf("get block %s: %w", c, err) } @@ -844,14 +844,14 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { } // IsAncestorOf returns true if 'a' is an ancestor of 'b' -func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { +func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) { if b.Height() <= a.Height() { return false, nil } cur := b for !a.Equals(cur) && cur.Height() > a.Height() { - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return false, err } @@ -862,13 +862,13 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { return cur.Equals(a), nil } -func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) { - l, _, err := cs.ReorgOps(a, b) +func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) { + l, _, err := cs.ReorgOps(ctx, a, b) if err != nil { return nil, err } - return cs.LoadTipSet(l[len(l)-1].Parents()) + return cs.LoadTipSet(ctx, l[len(l)-1].Parents()) } // ReorgOps takes two tipsets (which can be at different heights), and walks @@ -879,11 +879,11 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, // ancestor. // // If an error happens along the way, we return the error with nil slices. -func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { - return ReorgOps(cs.LoadTipSet, a, b) +func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + return ReorgOps(ctx, cs.LoadTipSet, a, b) } -func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { +func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { left := a right := b @@ -891,7 +891,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS for !left.Equals(right) { if left.Height() > right.Height() { leftChain = append(leftChain, left) - par, err := lts(left.Parents()) + par, err := lts(ctx, left.Parents()) if err != nil { return nil, nil, err } @@ -899,7 +899,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS left = par } else { rightChain = append(rightChain, right) - par, err := lts(right.Parents()) + par, err := lts(ctx, right.Parents()) if err != nil { log.Infof("failed to fetch right.Parents: %s", err) return nil, nil, err @@ -921,7 +921,7 @@ func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) { return } -func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { +func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error { cs.tstLk.Lock() defer cs.tstLk.Unlock() @@ -931,7 +931,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { log.Debug("tried to add block to tipset tracker that was already there") return nil } - h, err := cs.GetBlock(oc) + h, err := cs.GetBlock(ctx, oc) if err == nil && h != nil { if h.Miner == b.Miner { log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid()) @@ -960,7 +960,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { return nil } -func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { +func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error { sbs := make([]block.Block, len(b)) for i, header := range b { @@ -982,13 +982,13 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { end = len(b) } - err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end])) + err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end])) } return err } -func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { +func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) { // Hold lock for the whole function for now, if it becomes a problem we can // fix pretty easily cs.tstLk.Lock() @@ -1007,7 +1007,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) continue } - h, err := cs.GetBlock(bhc) + h, err := cs.GetBlock(ctx, bhc) if err != nil { return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) } @@ -1029,11 +1029,11 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) } func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } - ts, err := cs.expandTipset(b) + ts, err := cs.expandTipset(ctx, b) if err != nil { return err } @@ -1045,8 +1045,8 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error return nil } -func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.metadataDs.Get(dstore.NewKey("0")) +func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) { + data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0")) if err != nil { return nil, err } @@ -1056,22 +1056,22 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return nil, err } - return cs.GetBlock(c) + return cs.GetBlock(ctx, c) } // GetPath returns the sequence of atomic head change operations that // need to be applied in order to switch the head of the chain from the `from` // tipset to the `to` tipset. func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - fts, err := cs.LoadTipSet(from) + fts, err := cs.LoadTipSet(ctx, from) if err != nil { return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) } - tts, err := cs.LoadTipSet(to) + tts, err := cs.LoadTipSet(ctx, to) if err != nil { return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) } - revert, apply, err := cs.ReorgOps(fts, tts) + revert, apply, err := cs.ReorgOps(ctx, fts, tts) if err != nil { return nil, xerrors.Errorf("error getting tipset branches: %w", err) } @@ -1108,11 +1108,11 @@ func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store { return ActorStore(ctx, cs.stateBlockstore) } -func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { +func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) { var out []*types.FullBlock for _, b := range ts.Blocks() { - bmsgs, smsgs, err := cs.MessagesForBlock(b) + bmsgs, smsgs, err := cs.MessagesForBlock(ctx, b) if err != nil { // TODO: check for 'not found' errors, and only return nil if this // is actually a 'not found' error @@ -1154,7 +1154,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t if lbts.Height() < h { log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) - lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) + lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h) if err != nil { return nil, err } @@ -1164,7 +1164,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t return lbts, nil } - return cs.LoadTipSet(lbts.Parents()) + return cs.LoadTipSet(ctx, lbts.Parents()) } func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove @@ -1190,14 +1190,14 @@ func breakWeightTie(ts1, ts2 *types.TipSet) bool { return false } -func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { if tsk.IsEmpty() { return cs.GetHeaviestTipSet(), nil } - return cs.LoadTipSet(tsk) + return cs.LoadTipSet(ctx, tsk) } -func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { +func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1209,7 +1209,7 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 2004b266c..a759a48a8 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -109,7 +109,7 @@ func TestChainExportImport(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } @@ -144,12 +144,12 @@ func TestChainExportImportFull(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } - err = cs.SetHead(last) + err = cs.SetHead(context.Background(), last) if err != nil { t.Fatal(err) } diff --git a/chain/sync.go b/chain/sync.go index 34867b136..0293ae25e 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -119,8 +119,8 @@ type SyncManagerCtor func(syncFn SyncFunc) SyncManager type Genesis *types.TipSet -func LoadGenesis(sm *stmgr.StateManager) (Genesis, error) { - gen, err := sm.ChainStore().GetGenesis() +func LoadGenesis(ctx context.Context, sm *stmgr.StateManager) (Genesis, error) { + gen, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return nil, xerrors.Errorf("getting genesis block: %w", err) } @@ -227,7 +227,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore - if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, fts.TipSet().Blocks()...); err != nil { log.Warn("failed to persist incoming block header: ", err) return false } @@ -298,11 +298,11 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { // into the blockstore. blockstore := bstore.NewMemory() cst := cbor.NewCborStore(blockstore) - + ctx := context.Background() var bcids, scids []cid.Cid for _, m := range fblk.BlsMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } @@ -310,7 +310,7 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { } for _, m := range fblk.SecpkMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } @@ -360,7 +360,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { // TODO: should probably expose better methods on the blockstore for this operation var blks []blocks.Block for c := range cids { - b, err := from.Get(c) + b, err := from.Get(ctx, c) if err != nil { return err } @@ -368,7 +368,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { blks = append(blks, b) } - if err := to.PutMany(blks); err != nil { + if err := to.PutMany(ctx, blks); err != nil { return err } @@ -463,7 +463,7 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e // {hint/usage} This is used from the HELLO protocol, to fetch the greeting // peer's heaviest tipset if we don't have it. func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { + if fts, err := syncer.tryLoadFullTipSet(ctx, tsk); err == nil { return fts, nil } @@ -474,15 +474,15 @@ func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipS // tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full // representation of it containing FullBlocks. If ALL blocks are not found // locally, it errors entirely with blockstore.ErrNotFound. -func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { - ts, err := syncer.store.LoadTipSet(tsk) +func (syncer *Syncer) tryLoadFullTipSet(ctx context.Context, tsk types.TipSetKey) (*store.FullTipSet, error) { + ts, err := syncer.store.LoadTipSet(ctx, tsk) if err != nil { return nil, err } fts := &store.FullTipSet{} for _, b := range ts.Blocks() { - bmsgs, smsgs, err := syncer.store.MessagesForBlock(b) + bmsgs, smsgs, err := syncer.store.MessagesForBlock(ctx, b) if err != nil { return nil, err } @@ -583,7 +583,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, return xerrors.Errorf("validating block %s: %w", b.Cid(), err) } - if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { + if err := syncer.sm.ChainStore().AddToTipSetTracker(ctx, b.Header); err != nil { return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) } return nil @@ -755,7 +755,7 @@ loop: } // If, for some reason, we have a suffix of the chain locally, handle that here - ts, err := syncer.store.LoadTipSet(at) + ts, err := syncer.store.LoadTipSet(ctx, at) if err == nil { acceptedBlocks = append(acceptedBlocks, at.Cids()...) @@ -838,7 +838,7 @@ loop: return blockSet, nil } - knownParent, err := syncer.store.LoadTipSet(known.Parents()) + knownParent, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -892,7 +892,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, err } - nts, err := syncer.store.LoadTipSet(known.Parents()) + nts, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -928,7 +928,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, ErrForkCheckpoint } - nts, err = syncer.store.LoadTipSet(nts.Parents()) + nts, err = syncer.store.LoadTipSet(ctx, nts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next local tipset: %w", err) } @@ -965,7 +965,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) for i := len(headers) - 1; i >= 0; { - fts, err := syncer.store.TryFillTipSet(headers[i]) + fts, err := syncer.store.TryFillTipSet(ctx, headers[i]) if err != nil { return err } @@ -1138,7 +1138,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co for _, m := range bst.Bls { //log.Infof("putting BLS message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("BLS message processing failed: %w", err) } @@ -1148,7 +1148,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) } //log.Infof("putting secp256k1 message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("secp256k1 message processing failed: %w", err) } @@ -1201,7 +1201,7 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t for _, ts := range headers { toPersist = append(toPersist, ts.Blocks()...) } - if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, toPersist...); err != nil { err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err) ss.Error(err) return err @@ -1245,7 +1245,7 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { return bbr.String(), ok } -func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { +func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1257,7 +1257,7 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := syncer.store.LoadTipSet(cur.Parents()) + next, err := syncer.store.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/sync_test.go b/chain/sync_test.go index 4175ff5fa..2af8aeb54 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -22,6 +22,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -205,20 +206,21 @@ func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) } func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bool) { + ctx := context.TODO() for _, fb := range fts.Blocks { var b types.BlockMsg // -1 to match block.Height b.Header = fb.Header for _, msg := range fb.SecpkMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.SecpkMessages = append(b.SecpkMessages, c) } for _, msg := range fb.BlsMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.BlsMessages = append(b.BlsMessages, c) @@ -298,7 +300,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { lastTs := blocks[len(blocks)-1].Blocks for _, lastB := range lastTs { cs := out.(*impl.FullNodeAPI).ChainAPI.Chain - require.NoError(tu.t, cs.AddToTipSetTracker(lastB.Header)) + require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header)) err = cs.AddBlock(tu.ctx, lastB.Header) require.NoError(tu.t, err) } @@ -542,7 +544,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64 return []uint64{1}, nil } -func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof2.PoStProof, error) { return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, @@ -734,7 +736,7 @@ func TestDuplicateNonce(t *testing.T) { t.Fatal("included message should be in exec trace") } - mft, err := tu.g.ChainStore().MessagesForTipset(ts1.TipSet()) + mft, err := tu.g.ChainStore().MessagesForTipset(context.TODO(), ts1.TipSet()) require.NoError(t, err) require.True(t, len(mft) == 1, "only expecting one message for this tipset") require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message") diff --git a/chain/vm/gas.go b/chain/vm/gas.go index 206a55d36..e75c86b9f 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,14 +3,16 @@ package vm import ( "fmt" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/build" - vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/build" ) type GasCharge struct { @@ -73,13 +75,16 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof5.SealVerifyInfo) GasCharge - OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge - OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof7.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge + OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } -var prices = map[abi.ChainEpoch]Pricelist{ +// Prices are the price lists per starting epoch. Public for testing purposes +// (concretely to allow the test vector runner to rebase prices). +var Prices = map[abi.ChainEpoch]Pricelist{ abi.ChainEpoch(0): &pricelistV0{ computeGasMulti: 1, storageGasMulti: 1000, @@ -204,6 +209,8 @@ var prices = map[abi.ChainEpoch]Pricelist{ }, verifyPostDiscount: false, verifyConsensusFault: 495422, + + verifyReplicaUpdate: 36316136, }, } @@ -212,8 +219,8 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { // since we are storing the prices as map or epoch to price // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg bestEpoch := abi.ChainEpoch(0) - bestPrice := prices[bestEpoch] - for e, pl := range prices { + bestPrice := Prices[bestEpoch] + for e, pl := range Prices { // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` if e > bestEpoch && e <= epoch { bestEpoch = e @@ -227,7 +234,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr5.Syscalls + under vmr.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -261,7 +268,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof7.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -269,7 +276,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof7.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -286,14 +293,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -307,9 +314,16 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealV return ps.under.BatchVerifySeals(inp) } -func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error { ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyAggregateSeals(aggregate) } + +func (ps pricedSyscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ps.chargeGas(ps.pl.OnVerifyReplicaUpdate(update)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyReplicaUpdate(update) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 13c5fdd86..1bda6dfae 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -3,8 +3,7 @@ package vm import ( "fmt" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -121,6 +120,8 @@ type pricelistV0 struct { verifyPostLookup map[abi.RegisteredPoStProof]scalingCost verifyPostDiscount bool verifyConsensusFault int64 + + verifyReplicaUpdate int64 } var _ Pricelist = (*pricelistV0)(nil) @@ -206,14 +207,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr } // OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifySeal(info proof7.SealVerifyInfo) GasCharge { // TODO: this needs more cost tunning, check with @lotus // this is not used return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } // OnVerifyAggregateSeals -func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge { proofType := aggregate.SealProof perProof, ok := pl.verifyAggregateSealPer[proofType] if !ok { @@ -228,8 +229,13 @@ func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVeri return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) } +// OnVerifyReplicaUpdate +func (pl *pricelistV0) OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge { + return newGasCharge("OnVerifyReplicaUpdate", pl.verifyReplicaUpdate, 0) +} + // OnVerifyPost -func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" var proofType abi.RegisteredPoStProof diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 85357e51b..8a7a4c8c9 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -16,7 +16,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index 8499f001a..fb9910ecd 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -1,7 +1,6 @@ package vm import ( - "context" "fmt" "io" "testing" @@ -136,9 +135,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { - return network.Version0 - }}, + vm: &VM{networkVersion: network.Version0}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { @@ -149,9 +146,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { - return network.Version7 - }}, + vm: &VM{networkVersion: network.Version7}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index ea49abff3..5716b5006 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -26,6 +26,7 @@ import ( builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" /* inline-gen end */ @@ -130,6 +131,8 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin5.AccountActorCodeID case actors.Version6: code = builtin6.AccountActorCodeID + case actors.Version7: + code = builtin7.AccountActorCodeID /* inline-gen end */ default: panic("unsupported actors version") diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 6e94030bd..0e2adc879 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "fmt" + "os" gruntime "runtime" "time" @@ -16,8 +17,12 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime" + rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime" rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime" + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -52,11 +57,11 @@ func (m *Message) ValueReceived() abi.TokenAmount { } // EnableGasTracing, if true, outputs gas tracing in execution traces. -var EnableGasTracing = false +var EnableGasTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1" type Runtime struct { - rt5.Message - rt5.Syscalls + rt7.Message + rt7.Syscalls ctx context.Context @@ -87,7 +92,7 @@ func (rt *Runtime) BaseFee() abi.TokenAmount { } func (rt *Runtime) NetworkVersion() network.Version { - return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) + return rt.vm.networkVersion } func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { @@ -142,7 +147,12 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { var _ rt0.Runtime = (*Runtime)(nil) var _ rt5.Runtime = (*Runtime)(nil) +var _ rt2.Runtime = (*Runtime)(nil) +var _ rt3.Runtime = (*Runtime)(nil) +var _ rt4.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) var _ rt6.Runtime = (*Runtime)(nil) +var _ rt7.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { @@ -214,16 +224,7 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) } func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - - if rnv >= network.Version13 { - res, err = rt.vm.rand.GetChainRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetChainRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) @@ -232,17 +233,7 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat } func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - if rnv >= network.Version14 { - res, err = rt.vm.rand.GetBeaconRandomnessV3(rt.ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - res, err = rt.vm.rand.GetBeaconRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetBeaconRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) @@ -323,7 +314,7 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) { } // Transfer the executing actor's balance to the beneficiary - if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { + if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance, rt.NetworkVersion()); err != nil { panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) } } diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 0cbefd1fd..cd143279e 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,6 +7,8 @@ import ( goruntime "runtime" "sync" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/minio/blake2b-simd" @@ -26,8 +28,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) func init() { @@ -36,10 +38,10 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime7.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime7.Syscalls { return &syscallShim{ ctx: ctx, @@ -90,7 +92,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -133,14 +135,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime5.ConsensusFault + var consensusFault *runtime7.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultDoubleForkMining, + Type: runtime7.ConsensusFaultDoubleForkMining, } } @@ -148,10 +150,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultTimeOffsetMining, + Type: runtime7.ConsensusFaultTimeOffsetMining, } } @@ -171,10 +173,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultParentGrinding, + Type: runtime7.ConsensusFaultParentGrinding, } } } @@ -243,8 +245,8 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) } -func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error { - ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) +func (ss *syscallShim) VerifyPoSt(info proof5.WindowPoStVerifyInfo) error { + ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), info) if err != nil { return err } @@ -286,6 +288,7 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify if err != nil { return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) } + if !ok { return fmt.Errorf("invalid aggregate proof") } @@ -293,6 +296,19 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify return nil } +func (ss *syscallShim) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ok, err := ss.verifier.VerifyReplicaUpdate(update) + if err != nil { + return xerrors.Errorf("failed to verify replica update: %w", err) + } + + if !ok { + return fmt.Errorf("invalid replica update") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 36308fe03..703a19880 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -82,10 +82,10 @@ type gasChargingBlocks struct { under cbor.IpldBlockstore } -func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { +func (bs *gasChargingBlocks) View(ctx context.Context, c cid.Cid, cb func([]byte) error) error { if v, ok := bs.under.(blockstore.Viewer); ok { bs.chargeGas(bs.pricelist.OnIpldGet()) - return v.View(c, func(b []byte) error { + return v.View(ctx, c, func(b []byte) error { // we have successfully retrieved the value; charge for it, even if the user-provided function fails. bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) bs.chargeGas(gasOnActorExec) @@ -93,16 +93,16 @@ func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { }) } // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. - blk, err := bs.Get(c) + blk, err := bs.Get(ctx, c) if err == nil && blk != nil { return cb(blk.RawData()) } return err } -func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { +func (bs *gasChargingBlocks) Get(ctx context.Context, c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) - blk, err := bs.under.Get(c) + blk, err := bs.under.Get(ctx, c) if err != nil { return nil, aerrors.Escalate(err, "failed to get block from blockstore") } @@ -112,10 +112,10 @@ func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { return blk, nil } -func (bs *gasChargingBlocks) Put(blk block.Block) error { +func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error { bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) - if err := bs.under.Put(blk); err != nil { + if err := bs.under.Put(ctx, blk); err != nil { return aerrors.Escalate(err, "failed to write data to disk") } bs.chargeGas(gasOnActorExec) @@ -169,7 +169,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti } vmm.From = resF - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 { + if vm.networkVersion <= network.Version3 { rt.Message = &vmm } else { resT, _ := rt.ResolveAddress(msg.To) @@ -202,18 +202,17 @@ type ( ) type VM struct { - cstate *state.StateTree - // TODO: Is base actually used? Can we delete it? - base cid.Cid + cstate *state.StateTree cst *cbor.BasicIpldStore buf *blockstore.BufferedBlockstore blockHeight abi.ChainEpoch areg *ActorRegistry rand Rand circSupplyCalc CircSupplyCalculator - ntwkVersion NtwkVersionGetter + networkVersion network.Version baseFee abi.TokenAmount lbStateGet LookbackStateGetter + baseCircSupply abi.TokenAmount Syscalls SyscallBuilder } @@ -226,7 +225,7 @@ type VMOpts struct { Actors *ActorRegistry Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator - NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter + NetworkVersion network.Version BaseFee abi.TokenAmount LookbackState LookbackStateGetter } @@ -239,28 +238,30 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { return nil, err } + baseCirc, err := opts.CircSupplyCalc(ctx, opts.Epoch, state) + if err != nil { + return nil, err + } + return &VM{ cstate: state, - base: opts.StateBase, cst: cst, buf: buf, blockHeight: opts.Epoch, areg: opts.Actors, rand: opts.Rand, // TODO: Probably should be a syscall circSupplyCalc: opts.CircSupplyCalc, - ntwkVersion: opts.NtwkVersion, + networkVersion: opts.NetworkVersion, Syscalls: opts.Syscalls, baseFee: opts.BaseFee, + baseCircSupply: baseCirc, lbStateGet: opts.LookbackState, }, nil } type Rand interface { - GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { @@ -312,7 +313,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, return nil, aerrors.Wrapf(err, "could not create account") } toActor = a - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 { + if vm.networkVersion <= network.Version3 { // Leave the rt.Message as is } else { nmsg := Message{ @@ -339,7 +340,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { - if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { + if err := vm.transfer(msg.From, msg.To, msg.Value, vm.networkVersion); err != nil { return nil, aerrors.Wrap(err, "failed to transfer funds") } } @@ -616,7 +617,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, } func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 { + if vm.networkVersion <= network.Version12 { // Check to see if we should burn funds. We avoid burning on successful // window post. This won't catch _indirect_ window post calls, but this // is the best we can get for now. @@ -706,7 +707,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err go func() { for b := range toFlush { - if err := to.PutMany(b); err != nil { + if err := to.PutMany(ctx, b); err != nil { close(freeBufs) errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) return @@ -735,7 +736,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } - if err := copyRec(from, to, root, batchCp); err != nil { + if err := copyRec(ctx, from, to, root, batchCp); err != nil { return xerrors.Errorf("copyRec: %w", err) } @@ -760,13 +761,13 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } -func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { +func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { if root.Prefix().MhType == 0 { // identity cid, skip return nil } - blk, err := from.Get(root) + blk, err := from.Get(ctx, root) if err != nil { return xerrors.Errorf("get %s failed: %w", root, err) } @@ -791,7 +792,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } else { // If we have an object, we already have its children, skip the object. - has, err := to.Has(link) + has, err := to.Has(ctx, link) if err != nil { lerr = xerrors.Errorf("has: %w", err) return @@ -801,7 +802,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } - if err := copyRec(from, to, link, cp); err != nil { + if err := copyRec(ctx, from, to, link, cp); err != nil { lerr = err return } @@ -823,8 +824,15 @@ func (vm *VM) StateTree() types.StateTree { return vm.cstate } -func (vm *VM) SetBlockHeight(h abi.ChainEpoch) { +func (vm *VM) SetBlockHeight(ctx context.Context, h abi.ChainEpoch) error { vm.blockHeight = h + ncirc, err := vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + if err != nil { + return err + } + + vm.baseCircSupply = ncirc + return nil } func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { @@ -854,12 +862,13 @@ func (vm *VM) SetInvoker(i *ActorRegistry) { vm.areg = i } -func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version { - return vm.ntwkVersion(ctx, ce) -} - func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { - return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + // Before v15, this was recalculated on each invocation as the state tree was mutated + if vm.networkVersion <= network.Version14 { + return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + } + + return vm.baseCircSupply, nil } func (vm *VM) incrementNonce(addr address.Address) error { @@ -869,32 +878,71 @@ func (vm *VM) incrementNonce(addr address.Address) error { }) } -func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { - if from == to { - return nil - } +func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { + var f *types.Actor + var fromID, toID address.Address + var err error + // switching the order around so that transactions for more than the balance sent to self fail + if networkVersion >= network.Version15 { + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } - fromID, err := vm.cstate.LookupID(from) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) - } + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } - toID, err := vm.cstate.LookupID(to) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) - } + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } - if fromID == toID { - return nil - } + if f.Balance.LessThan(amt) { + return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed, insufficient balance in sender actor: %v", f.Balance) + } - if amt.LessThan(types.NewInt(0)) { - return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) - } + if from == to { + log.Infow("sending to same address: noop", "from/to addr", from) + return nil + } - f, err := vm.cstate.GetActor(fromID) - if err != nil { - return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + log.Infow("sending to same actor ID: noop", "from/to actor", fromID) + return nil + } + } else { + if from == to { + return nil + } + + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } + + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + return nil + } + + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } + + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } } t, err := vm.cstate.GetActor(toID) @@ -902,17 +950,17 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) } - if err := deductFunds(f, amt); err != nil { + if err = deductFunds(f, amt); err != nil { return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err) } depositFunds(t, amt) - if err := vm.cstate.SetActor(fromID, f); err != nil { - return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) + if err = vm.cstate.SetActor(fromID, f); err != nil { + return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) } - if err := vm.cstate.SetActor(toID, t); err != nil { - return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) + if err = vm.cstate.SetActor(toID, t); err != nil { + return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) } return nil diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go index eb16f6460..5279389de 100644 --- a/chain/wallet/ledger/ledger.go +++ b/chain/wallet/ledger/ledger.go @@ -39,7 +39,7 @@ type LedgerKeyInfo struct { var _ api.Wallet = (*LedgerWallet)(nil) func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { - ki, err := lw.getKeyInfo(signer) + ki, err := lw.getKeyInfo(ctx, signer) if err != nil { return nil, err } @@ -80,8 +80,8 @@ func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, t }, nil } -func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) { - kib, err := lw.ds.Get(keyForAddr(addr)) +func (lw LedgerWallet) getKeyInfo(ctx context.Context, addr address.Address) (*LedgerKeyInfo, error) { + kib, err := lw.ds.Get(ctx, keyForAddr(addr)) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) } func (lw LedgerWallet) WalletDelete(ctx context.Context, k address.Address) error { - return lw.ds.Delete(keyForAddr(k)) + return lw.ds.Delete(ctx, keyForAddr(k)) } func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*types.KeyInfo, error) { @@ -103,7 +103,7 @@ func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*ty } func (lw LedgerWallet) WalletHas(ctx context.Context, k address.Address) (bool, error) { - _, err := lw.ds.Get(keyForAddr(k)) + _, err := lw.ds.Get(ctx, keyForAddr(k)) if err == nil { return true, nil } @@ -118,10 +118,10 @@ func (lw LedgerWallet) WalletImport(ctx context.Context, kinfo *types.KeyInfo) ( if err := json.Unmarshal(kinfo.PrivateKey, &ki); err != nil { return address.Undef, err } - return lw.importKey(ki) + return lw.importKey(ctx, ki) } -func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { +func (lw LedgerWallet) importKey(ctx context.Context, ki LedgerKeyInfo) (address.Address, error) { if ki.Address == address.Undef { return address.Undef, fmt.Errorf("no address given in imported key info") } @@ -133,7 +133,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { return address.Undef, xerrors.Errorf("marshaling key info: %w", err) } - if err := lw.ds.Put(keyForAddr(ki.Address), bb); err != nil { + if err := lw.ds.Put(ctx, keyForAddr(ki.Address), bb); err != nil { return address.Undef, err } @@ -141,7 +141,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { } func (lw LedgerWallet) WalletList(ctx context.Context) ([]address.Address, error) { - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. t, types.KTSecp256k1Ledger) } - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return address.Undef, err } @@ -224,7 +224,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. lki.Address = a lki.Path = path - return lw.importKey(lki) + return lw.importKey(ctx, lki) } func (lw *LedgerWallet) Get() api.Wallet { diff --git a/cli/backup.go b/cli/backup.go index 856e098dd..4d88d4bbc 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -66,7 +66,7 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma return xerrors.Errorf("opening backup file %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(cctx.Context, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/cli/client.go b/cli/client.go index f5b38788b..634bd18e5 100644 --- a/cli/client.go +++ b/cli/client.go @@ -92,6 +92,7 @@ var clientCmd = &cli.Command{ WithCategory("data", clientLocalCmd), WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), + WithCategory("retrieval", clientQueryRetrievalAskCmd), WithCategory("retrieval", clientRetrieveCmd), WithCategory("retrieval", clientRetrieveCatCmd), WithCategory("retrieval", clientRetrieveLsCmd), @@ -1030,6 +1031,67 @@ var clientFindCmd = &cli.Command{ }, } +var clientQueryRetrievalAskCmd = &cli.Command{ + Name: "retrieval-ask", + Usage: "Get a miner's retrieval ask", + ArgsUsage: "[minerAddress] [data CID]", + Flags: []cli.Flag{ + &cli.Int64Flag{ + Name: "size", + Usage: "data size in bytes", + }, + }, + Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) + if cctx.NArg() != 2 { + afmt.Println("Usage: retrieval-ask [minerAddress] [data CID]") + return nil + } + + maddr, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + dataCid, err := cid.Parse(cctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("parsing data cid: %w", err) + } + + api, closer, err := GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + ask, err := api.ClientMinerQueryOffer(ctx, maddr, dataCid, nil) + if err != nil { + return err + } + + afmt.Printf("Ask: %s\n", maddr) + afmt.Printf("Unseal price: %s\n", types.FIL(ask.UnsealPrice)) + afmt.Printf("Price per byte: %s\n", types.FIL(ask.PricePerByte)) + afmt.Printf("Payment interval: %s\n", types.SizeStr(types.NewInt(ask.PaymentInterval))) + afmt.Printf("Payment interval increase: %s\n", types.SizeStr(types.NewInt(ask.PaymentIntervalIncrease))) + + size := cctx.Uint64("size") + if size == 0 { + if ask.Size == 0 { + return nil + } + size = ask.Size + afmt.Printf("Size: %s\n", types.SizeStr(types.NewInt(ask.Size))) + } + transferPrice := types.BigMul(ask.PricePerByte, types.NewInt(size)) + totalPrice := types.BigAdd(ask.UnsealPrice, transferPrice) + afmt.Printf("Total price for %d bytes: %s\n", size, types.FIL(totalPrice)) + + return nil + }, +} + var clientListRetrievalsCmd = &cli.Command{ Name: "list-retrievals", Usage: "List retrieval market deals", diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index f4cc0f837..0fddf515d 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -5,10 +5,11 @@ import ( "context" "errors" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -36,7 +37,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } hash := hasher.Sum(nil) key := datastore.NewKey(string(hash)) - fromDs, err := cv.ds.Get(key) + fromDs, err := cv.ds.Get(context.Background(), key) if err == nil { switch fromDs[0] { case 's': @@ -66,7 +67,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } if len(save) != 0 { - errSave := cv.ds.Put(key, save) + errSave := cv.ds.Put(context.Background(), key, save) if errSave != nil { log.Errorf("error saving result: %+v", errSave) } @@ -85,9 +86,10 @@ func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { }, &svi) } -func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { return cv.backend.VerifyWinningPoSt(ctx, info) } + func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { return cv.withCache(func() (bool, error) { return cv.backend.VerifyWindowPoSt(ctx, info) @@ -97,8 +99,12 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } -func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { return cv.backend.VerifyAggregateSeals(aggregate) } +func (cv cachingVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return cv.backend.VerifyReplicaUpdate(update) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c66b90deb..18ce6c7ef 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -17,8 +17,6 @@ import ( "time" ocprom "contrib.go.opencensus.io/exporter/prometheus" - "github.com/cockroachdb/pebble" - "github.com/cockroachdb/pebble/bloom" "github.com/ipfs/go-cid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -46,7 +44,6 @@ import ( "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" measure "github.com/ipfs/go-ds-measure" - pebbleds "github.com/ipfs/go-ds-pebble" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -114,9 +111,6 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-import", }, - &cli.BoolFlag{ - Name: "use-pebble", - }, &cli.BoolFlag{ Name: "use-native-badger", }, @@ -178,29 +172,6 @@ var importBenchCmd = &cli.Command{ ) switch { - case cctx.Bool("use-pebble"): - log.Info("using pebble") - cache := 512 - ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ - // Pebble has a single combined cache area and the write - // buffers are taken from this too. Assign all available - // memory allowance for cache. - Cache: pebble.NewCache(int64(cache * 1024 * 1024)), - // The size of memory table(as well as the write buffer). - // Note, there may have more than two memory tables in the system. - // MemTableStopWritesThreshold can be configured to avoid the memory abuse. - MemTableSize: cache * 1024 * 1024 / 4, - // The default compaction concurrency(1 thread), - // Here use all available CPUs for faster compaction. - MaxConcurrentCompactions: runtime.NumCPU(), - // Per-level options. Options for at least one level must be specified. The - // options for the last level are used for all subsequent levels. - Levels: []pebble.LevelOptions{ - {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression}, - }, - Logger: log, - }) - case cctx.Bool("use-native-badger"): log.Info("using native badger") var opts badgerbs.Options @@ -333,7 +304,7 @@ var importBenchCmd = &cli.Command{ return fmt.Errorf("no CAR file provided for import") } - head, err = cs.Import(carFile) + head, err = cs.Import(cctx.Context, carFile) if err != nil { return err } @@ -356,7 +327,7 @@ var importBenchCmd = &cli.Command{ return xerrors.Errorf("failed to parse head tipset key: %w", err) } - head, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) if err != nil { return err } @@ -365,7 +336,7 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } - head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cr.Header.Roots...)) if err != nil { return err } @@ -382,7 +353,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to parse genesis tipset key: %w", err) } - genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + genesis, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else { log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset") // fallback to the slow path of walking the chain. @@ -393,7 +364,7 @@ var importBenchCmd = &cli.Command{ return err } - if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil { + if err = cs.SetGenesis(cctx.Context, genesis.Blocks()[0]); err != nil { return err } @@ -404,10 +375,10 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to end genesis tipset key: %w", err) } - end, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + end, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("end-height"); h != 0 { log.Infof("getting end tipset at height %d...", h) - end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) + end, err = cs.GetTipsetByHeight(cctx.Context, abi.ChainEpoch(h), head, true) } if err != nil { @@ -426,7 +397,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to start genesis tipset key: %w", err) } - start, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + start, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("start-height"); h != 0 { log.Infof("getting start tipset at height %d...", h) // lookback from the end tipset (which falls back to head if not supplied). @@ -439,7 +410,7 @@ var importBenchCmd = &cli.Command{ if start != nil { startEpoch = start.Height() - if err := cs.ForceHeadSilent(context.Background(), start); err != nil { + if err := cs.ForceHeadSilent(cctx.Context, start); err != nil { // if err := cs.SetHead(start); err != nil { return err } @@ -450,7 +421,7 @@ var importBenchCmd = &cli.Command{ if h := ts.Height(); h%100 == 0 { log.Infof("walking back the chain; loaded tipset at height %d...", h) } - next, err := cs.LoadTipSet(ts.Parents()) + next, err := cs.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 0b8ec6fe3..b0e71b90e 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -12,6 +12,8 @@ import ( "time" saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + saproof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/docker/go-units" logging "github.com/ipfs/go-log/v2" @@ -260,7 +262,8 @@ var sealBenchCmd = &cli.Command{ sectorNumber := c.Int("num-sectors") var sealTimings []SealingResult - var sealedSectors []saproof2.SectorInfo + var extendedSealedSectors []saproof7.ExtendedSectorInfo + var sealedSectors []saproof7.SectorInfo if robench == "" { var err error @@ -269,7 +272,7 @@ var sealBenchCmd = &cli.Command{ PreCommit2: 1, Commit: 1, } - sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal")) + sealTimings, extendedSealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal")) if err != nil { return xerrors.Errorf("failed to run seals: %w", err) } @@ -296,7 +299,13 @@ var sealBenchCmd = &cli.Command{ } for _, s := range genm.Sectors { - sealedSectors = append(sealedSectors, saproof2.SectorInfo{ + extendedSealedSectors = append(extendedSealedSectors, saproof7.ExtendedSectorInfo{ + SealedCID: s.CommR, + SectorNumber: s.SectorID, + SealProof: s.ProofType, + SectorKey: nil, + }) + sealedSectors = append(sealedSectors, proof.SectorInfo{ SealedCID: s.CommR, SectorNumber: s.SectorID, SealProof: s.ProofType, @@ -325,20 +334,20 @@ var sealBenchCmd = &cli.Command{ return err } - fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(sealedSectors))) + fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(extendedSealedSectors))) if err != nil { return err } - candidates := make([]saproof2.SectorInfo, len(fcandidates)) + xcandidates := make([]saproof7.ExtendedSectorInfo, len(fcandidates)) for i, fcandidate := range fcandidates { - candidates[i] = sealedSectors[fcandidate] + xcandidates[i] = extendedSealedSectors[fcandidate] } gencandidates := time.Now() log.Info("computing winning post snark (cold)") - proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:]) + proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:]) if err != nil { return err } @@ -346,14 +355,23 @@ var sealBenchCmd = &cli.Command{ winningpost1 := time.Now() log.Info("computing winning post snark (hot)") - proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:]) + proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:]) if err != nil { return err } + candidates := make([]saproof7.SectorInfo, len(xcandidates)) + for i, xsi := range xcandidates { + candidates[i] = saproof7.SectorInfo{ + SealedCID: xsi.SealedCID, + SectorNumber: xsi.SectorNumber, + SealProof: xsi.SealProof, + } + } + winnningpost2 := time.Now() - pvi1 := saproof2.WinningPoStVerifyInfo{ + pvi1 := saproof7.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof1, ChallengedSectors: candidates, @@ -369,7 +387,7 @@ var sealBenchCmd = &cli.Command{ verifyWinningPost1 := time.Now() - pvi2 := saproof2.WinningPoStVerifyInfo{ + pvi2 := saproof7.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof2, ChallengedSectors: candidates, @@ -386,7 +404,7 @@ var sealBenchCmd = &cli.Command{ verifyWinningPost2 := time.Now() log.Info("computing window post snark (cold)") - wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:]) + wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:]) if err != nil { return err } @@ -394,7 +412,7 @@ var sealBenchCmd = &cli.Command{ windowpost1 := time.Now() log.Info("computing window post snark (hot)") - wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:]) + wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:]) if err != nil { return err } @@ -502,10 +520,10 @@ type ParCfg struct { Commit int } -func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) { +func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof7.ExtendedSectorInfo, error) { var pieces []abi.PieceInfo sealTimings := make([]SealingResult, numSectors) - sealedSectors := make([]saproof2.SectorInfo, numSectors) + sealedSectors := make([]saproof7.ExtendedSectorInfo, numSectors) preCommit2Sema := make(chan struct{}, par.PreCommit2) commitSema := make(chan struct{}, par.Commit) @@ -579,10 +597,11 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par precommit2 := time.Now() <-preCommit2Sema - sealedSectors[i] = saproof2.SectorInfo{ + sealedSectors[i] = saproof7.ExtendedSectorInfo{ SealProof: sid.ProofType, SectorNumber: i, SealedCID: cids.Sealed, + SectorKey: nil, } seed := lapi.SealSeed{ diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index e50c4366e..39de942aa 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -470,6 +470,8 @@ var stateList = []stateMeta{ {col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.WaitDeals}, {col: color.FgBlue, state: sealing.AddPiece}, + {col: color.FgBlue, state: sealing.SnapDealsWaitDeals}, + {col: color.FgBlue, state: sealing.SnapDealsAddPiece}, {col: color.FgRed, state: sealing.UndefinedSectorState}, {col: color.FgYellow, state: sealing.Packing}, @@ -488,6 +490,12 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, {col: color.FgYellow, state: sealing.CommitAggregateWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, + {col: color.FgYellow, state: sealing.SnapDealsPacking}, + {col: color.FgYellow, state: sealing.UpdateReplica}, + {col: color.FgYellow, state: sealing.ProveReplicaUpdate}, + {col: color.FgYellow, state: sealing.SubmitReplicaUpdate}, + {col: color.FgYellow, state: sealing.ReplicaUpdateWait}, + {col: color.FgYellow, state: sealing.FinalizeReplicaUpdate}, {col: color.FgCyan, state: sealing.Terminating}, {col: color.FgCyan, state: sealing.TerminateWait}, @@ -495,6 +503,7 @@ var stateList = []stateMeta{ {col: color.FgCyan, state: sealing.TerminateFailed}, {col: color.FgCyan, state: sealing.Removing}, {col: color.FgCyan, state: sealing.Removed}, + {col: color.FgCyan, state: sealing.AbortUpgrade}, {col: color.FgRed, state: sealing.FailedUnrecoverable}, {col: color.FgRed, state: sealing.AddPieceFailed}, @@ -512,6 +521,9 @@ var stateList = []stateMeta{ {col: color.FgRed, state: sealing.RemoveFailed}, {col: color.FgRed, state: sealing.DealsExpired}, {col: color.FgRed, state: sealing.RecoverDealIDs}, + {col: color.FgRed, state: sealing.SnapDealsAddPieceFailed}, + {col: color.FgRed, state: sealing.SnapDealsDealsExpired}, + {col: color.FgRed, state: sealing.ReplicaUpdateFailed}, } func init() { diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index b2199dd94..ae742c663 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -347,7 +347,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string return err } - if err := mds.Put(sectorKey, b); err != nil { + if err := mds.Put(ctx, sectorKey, b); err != nil { return err } @@ -387,7 +387,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string buf := make([]byte, binary.MaxVarintLen64) size := binary.PutUvarint(buf, uint64(maxSectorID)) - return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) + return mds.Put(ctx, datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) } func findMarketDealID(ctx context.Context, api v1api.FullNode, deal market2.DealProposal) (abi.DealID, error) { @@ -428,7 +428,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode return xerrors.Errorf("peer ID from private key: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -441,7 +441,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } if cctx.Bool("genesis-miner") { - if err := mds.Put(datastore.NewKey("miner-address"), a.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), a.Bytes()); err != nil { return err } @@ -548,7 +548,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } log.Infof("Created new miner: %s", addr) - if err := mds.Put(datastore.NewKey("miner-address"), addr.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), addr.Bytes()); err != nil { return err } diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go index 0974a7c5d..1aaa7909a 100644 --- a/cmd/lotus-miner/init_restore.go +++ b/cmd/lotus-miner/init_restore.go @@ -233,7 +233,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -255,7 +255,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Checking actor metadata") - abytes, err := mds.Get(datastore.NewKey("miner-address")) + abytes, err := mds.Get(ctx, datastore.NewKey("miner-address")) if err != nil { return xerrors.Errorf("getting actor address from metadata datastore: %w", err) } diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 110748f48..57b5d8a3e 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -7,7 +7,6 @@ import ( "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" - "go.opencensus.io/trace" "golang.org/x/xerrors" cliutil "github.com/filecoin-project/lotus/cli/util" @@ -55,10 +54,11 @@ func main() { lcli.WithCategory("storage", sealingCmd), lcli.WithCategory("retrieval", piecesCmd), } + jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -66,7 +66,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if cctx.IsSet("color") { diff --git a/cmd/lotus-miner/market.go b/cmd/lotus-miner/market.go index d3da6f9b3..c7089e74e 100644 --- a/cmd/lotus-miner/market.go +++ b/cmd/lotus-miner/market.go @@ -638,6 +638,7 @@ var dataTransfersCmd = &cli.Command{ transfersListCmd, marketRestartTransfer, marketCancelTransfer, + transfersDiagnosticsCmd, }, } @@ -857,6 +858,38 @@ var transfersListCmd = &cli.Command{ }, } +var transfersDiagnosticsCmd = &cli.Command{ + Name: "diagnostics", + Usage: "Get detailed diagnostics on active transfers with a specific peer", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + api, closer, err := lcli.GetMarketsAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + targetPeer, err := peer.Decode(cctx.Args().First()) + if err != nil { + return err + } + diagnostics, err := api.MarketDataTransferDiagnostics(ctx, targetPeer) + if err != nil { + return err + } + out, err := json.MarshalIndent(diagnostics, "", "\t") + if err != nil { + return err + } + fmt.Println(string(out)) + return nil + }, +} + var dealsPendingPublish = &cli.Command{ Name: "pending-publish", Usage: "list deals waiting in publish queue", diff --git a/cmd/lotus-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go index 1ce1f6593..bd5d30a4e 100644 --- a/cmd/lotus-miner/retrieval-deals.go +++ b/cmd/lotus-miner/retrieval-deals.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "sort" "text/tabwriter" "github.com/docker/go-units" @@ -137,6 +138,10 @@ var retrievalDealsListCmd = &cli.Command{ return err } + sort.Slice(deals, func(i, j int) bool { + return deals[i].ID < deals[j].ID + }) + w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) _, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n") diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 43a71fd9e..629ff7903 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -20,6 +20,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/api" @@ -50,11 +51,13 @@ var sectorsCmd = &cli.Command{ sectorsExtendCmd, sectorsTerminateCmd, sectorsRemoveCmd, + sectorsSnapUpCmd, sectorsMarkForUpgradeCmd, sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, sectorsBatching, + sectorsRefreshPieceMatchingCmd, }, } @@ -1476,6 +1479,44 @@ var sectorsRemoveCmd = &cli.Command{ }, } +var sectorsSnapUpCmd = &cli.Command{ + Name: "snap-up", + Usage: "Mark a committed capacity sector to be filled with deals", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number")) + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + ctx := lcli.ReqContext(cctx) + + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to get network version: %w", err) + } + if nv < network.Version15 { + return xerrors.Errorf("snap deals upgrades enabled in network v15") + } + + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector number: %w", err) + } + + return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), true) + }, +} + var sectorsMarkForUpgradeCmd = &cli.Command{ Name: "mark-for-upgrade", Usage: "Mark a committed capacity sector for replacement by a sector with deals", @@ -1490,14 +1531,28 @@ var sectorsMarkForUpgradeCmd = &cli.Command{ return err } defer closer() + + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() ctx := lcli.ReqContext(cctx) + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to get network version: %w", err) + } + if nv >= network.Version15 { + return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`") + } + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) if err != nil { return xerrors.Errorf("could not parse sector number: %w", err) } - return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id)) + return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false) }, } @@ -1648,6 +1703,11 @@ var sectorsUpdateCmd = &cli.Command{ return xerrors.Errorf("could not parse sector number: %w", err) } + _, err = nodeApi.SectorsStatus(ctx, abi.SectorNumber(id), false) + if err != nil { + return xerrors.Errorf("sector %d not found, could not change state", id) + } + newState := cctx.Args().Get(1) if _, ok := sealing.ExistSectorStateList[sealing.SectorState(newState)]; !ok { fmt.Printf(" \"%s\" is not a valid state. Possible states for sectors are: \n", newState) @@ -1995,6 +2055,25 @@ var sectorsBatchingPendingPreCommit = &cli.Command{ }, } +var sectorsRefreshPieceMatchingCmd = &cli.Command{ + Name: "match-pending-pieces", + Usage: "force a refreshed match of pending pieces to open sectors without manually waiting for more deals", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if err := nodeApi.SectorMatchPendingPiecesToOpenSectors(ctx); err != nil { + return err + } + + return nil + }, +} + func yesno(b bool) string { if b { return color.GreenString("YES") diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index 4df6a9904..6f7a627f6 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -48,6 +48,7 @@ stored while moving through the sealing pipeline (references as 'seal').`, storageListCmd, storageFindCmd, storageCleanupCmd, + storageLocks, }, } @@ -758,3 +759,43 @@ func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi v0 return nil } + +var storageLocks = &cli.Command{ + Name: "locks", + Usage: "show active sector locks", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + locks, err := api.StorageGetLocks(ctx) + if err != nil { + return err + } + + for _, lock := range locks.Locks { + st, err := api.SectorsStatus(ctx, lock.Sector.Number, false) + if err != nil { + return xerrors.Errorf("getting sector status(%d): %w", lock.Sector.Number, err) + } + + lockstr := fmt.Sprintf("%d\t%s\t", lock.Sector.Number, color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State)) + + for i := 0; i < storiface.FileTypes; i++ { + if lock.Write[i] > 0 { + lockstr += fmt.Sprintf("%s(%s) ", storiface.SectorFileType(1< 0 { + lockstr += fmt.Sprintf("%s(%s:%d) ", storiface.SectorFileType(1< 0 && i%100 == 0 { + fmt.Printf("catching up until latest message lands") + _, err := api.StateWaitMsg(ctx, smsg.Cid(), 1, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + } + + msgCids = append(msgCids, smsg.Cid()) + } + + fmt.Println("waiting on messages...") + + for _, msgCid := range msgCids { + ml, err := api.StateWaitMsg(ctx, msgCid, 5, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + if ml.Receipt.ExitCode != exitcode.Ok { + fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msgCid, ml.Receipt.ExitCode) + } + } + + fmt.Println("all sent messages succeeded") + return nil + }, +} diff --git a/cmd/lotus-shed/splitstore.go b/cmd/lotus-shed/splitstore.go index 4f668888e..58563955f 100644 --- a/cmd/lotus-shed/splitstore.go +++ b/cmd/lotus-shed/splitstore.go @@ -331,7 +331,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { } var keys []datastore.Key - res, err := ds.Query(query.Query{Prefix: "/splitstore"}) + res, err := ds.Query(context.Background(), query.Query{Prefix: "/splitstore"}) if err != nil { return xerrors.Errorf("error querying datastore for splitstore keys: %w", err) } @@ -346,7 +346,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { for _, k := range keys { fmt.Printf("deleting %s from datastore...\n", k) - err = ds.Delete(k) + err = ds.Delete(context.Background(), k) if err != nil { return xerrors.Errorf("error deleting key %s from datastore: %w", k, err) } diff --git a/cmd/lotus-shed/terminations.go b/cmd/lotus-shed/terminations.go new file mode 100644 index 000000000..0691f35da --- /dev/null +++ b/cmd/lotus-shed/terminations.go @@ -0,0 +1,181 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "strconv" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/node/repo" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" +) + +var terminationsCmd = &cli.Command{ + Name: "terminations", + Description: "Lists terminated deals from the past 2 days", + ArgsUsage: "[block to look back from] [lookback period (epochs)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass block cid && lookback period") + } + + blkCid, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + blk, err := cs.GetBlock(ctx, blkCid) + if err != nil { + return err + } + + lbp, err := strconv.Atoi(cctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + cutoff := blk.Height - abi.ChainEpoch(lbp) + + for blk.Height > cutoff { + pts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return err + } + + blk = pts.Blocks()[0] + + msgs, err := cs.MessagesForTipset(ctx, pts) + if err != nil { + return err + } + + for _, v := range msgs { + msg := v.VMMessage() + if msg.Method != miner.Methods.TerminateSectors { + continue + } + + tree, err := state.LoadStateTree(cst, blk.ParentStateRoot) + if err != nil { + return err + } + + minerAct, err := tree.GetActor(msg.To) + if err != nil { + return err + } + + if !builtin.IsStorageMinerActor(minerAct.Code) { + continue + } + + minerSt, err := miner.Load(store, minerAct) + if err != nil { + return err + } + + marketAct, err := tree.GetActor(market.Address) + if err != nil { + return err + } + + marketSt, err := market.Load(store, marketAct) + if err != nil { + return err + } + + proposals, err := marketSt.Proposals() + if err != nil { + return err + } + + var termParams miner2.TerminateSectorsParams + err = termParams.UnmarshalCBOR(bytes.NewBuffer(msg.Params)) + if err != nil { + return err + } + + for _, t := range termParams.Terminations { + sectors, err := minerSt.LoadSectors(&t.Sectors) + if err != nil { + return err + } + + for _, sector := range sectors { + for _, deal := range sector.DealIDs { + prop, find, err := proposals.Get(deal) + if err != nil { + return err + } + if find { + fmt.Printf("%s, %d, %d, %s, %s, %s\n", msg.To, sector.SectorNumber, deal, prop.Client, prop.PieceCID, prop.Label) + } + } + } + } + } + } + + return nil + }, +} diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go index 4867a5da5..23ea454a3 100644 --- a/cmd/lotus-sim/create.go +++ b/cmd/lotus-sim/create.go @@ -26,7 +26,7 @@ var createSimCommand = &cli.Command{ var ts *types.TipSet switch cctx.NArg() { case 0: - if err := node.Chainstore.Load(); err != nil { + if err := node.Chainstore.Load(cctx.Context); err != nil { return err } ts = node.Chainstore.GetHeaviestTipSet() @@ -36,7 +36,7 @@ var createSimCommand = &cli.Command{ return err } tsk := types.NewTipSetKey(cids...) - ts, err = node.Chainstore.LoadTipSet(tsk) + ts, err = node.Chainstore.LoadTipSet(cctx.Context, tsk) if err != nil { return err } diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go index 4372ee34a..a92d2cde4 100644 --- a/cmd/lotus-sim/info_capacity.go +++ b/cmd/lotus-sim/info_capacity.go @@ -39,7 +39,7 @@ var infoCapacityGrowthSimCommand = &cli.Command{ lastHeight := ts.Height() for ts.Height() > firstEpoch && cctx.Err() == nil { - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go index 5c9541513..125dae81d 100644 --- a/cmd/lotus-sim/info_state.go +++ b/cmd/lotus-sim/info_state.go @@ -60,7 +60,7 @@ var infoStateGrowthSimCommand = &cli.Command{ var links []cid.Cid var totalSize uint64 - if err := store.View(c, func(data []byte) error { + if err := store.View(cctx.Context, c, func(data []byte) error { totalSize += uint64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { if c.Prefix().Codec != cid.DagCBOR { @@ -131,7 +131,7 @@ var infoStateGrowthSimCommand = &cli.Command{ fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) } - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go index 93e6a3191..106bc53f5 100644 --- a/cmd/lotus-sim/simulation/block.go +++ b/cmd/lotus-sim/simulation/block.go @@ -73,7 +73,7 @@ func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message Timestamp: uts, ElectionProof: &types.ElectionProof{WinCount: 1}, }} - err = sim.Node.Chainstore.PersistBlockHeaders(blks...) + err = sim.Node.Chainstore.PersistBlockHeaders(ctx, blks...) if err != nil { return nil, xerrors.Errorf("failed to persist block headers: %w", err) } diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go index a6353e4f4..fb822eb6e 100644 --- a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -79,7 +79,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S // 1. We don't charge a fee. // 2. The runtime has "fake" proof logic. // 3. We don't actually save any of the results. - r := lrand.NewStateRand(sm.ChainStore(), parentTs.Cids(), sm.Beacon()) + r := lrand.NewStateRand(sm.ChainStore(), parentTs.Cids(), sm.Beacon(), sm.GetNetworkVersion) vmopt := &vm.VMOpts{ StateBase: parentState, Epoch: parentTs.Height() + 1, @@ -88,7 +88,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S Actors: filcns.NewActorRegistry(), Syscalls: sm.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, parentTs.Height()+1), BaseFee: abi.NewTokenAmount(0), LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs), } @@ -265,7 +265,7 @@ func (bb *BlockBuilder) Height() abi.ChainEpoch { // NetworkVersion returns the network version for the target block. func (bb *BlockBuilder) NetworkVersion() network.Version { - return bb.sm.GetNtwkVersion(bb.ctx, bb.Height()) + return bb.sm.GetNetworkVersion(bb.ctx, bb.Height()) } // StateManager returns the stmgr.StateManager. diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go index 5bed27436..d6dd98d43 100644 --- a/cmd/lotus-sim/simulation/messages.go +++ b/cmd/lotus-sim/simulation/messages.go @@ -31,7 +31,7 @@ func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Mess // fail a pre-commit... var msgCids []cid.Cid for _, msg := range messages { - c, err := sim.Node.Chainstore.PutMessage(msg) + c, err := sim.Node.Chainstore.PutMessage(ctx, msg) if err != nil { return cid.Undef, err } diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go index 38648f758..b1d36ba48 100644 --- a/cmd/lotus-sim/simulation/mock/mock.go +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -6,6 +6,8 @@ import ( "encoding/binary" "fmt" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -70,7 +72,13 @@ func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPro ) return false, nil } -func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + +// TODO: do the thing +func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return false, nil +} + +func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { panic("should not be called") } func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index c18f27a33..da027ff4f 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -135,7 +135,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) StateManager: sm, stages: stages, } - if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil { + if has, err := nd.MetadataDS.Has(ctx, sim.key("head")); err != nil { return nil, err } else if has { return nil, xerrors.Errorf("simulation named %s already exists", name) @@ -155,7 +155,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) // ListSims lists all simulations. func (nd *Node) ListSims(ctx context.Context) ([]string, error) { prefix := simulationPrefix.ChildString("head").String() - items, err := nd.MetadataDS.Query(query.Query{ + items, err := nd.MetadataDS.Query(ctx, query.Query{ Prefix: prefix, KeysOnly: true, Orders: []query.Order{query.OrderByKey{}}, @@ -192,7 +192,7 @@ func (nd *Node) DeleteSim(ctx context.Context, name string) error { var err error for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(name) - err = multierr.Append(err, nd.MetadataDS.Delete(key)) + err = multierr.Append(err, nd.MetadataDS.Delete(ctx, key)) } return err } @@ -209,7 +209,7 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { values := make(map[string][]byte) for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(oldName) - value, err := nd.MetadataDS.Get(key) + value, err := nd.MetadataDS.Get(ctx, key) if err == datastore.ErrNotFound { continue } else if err != nil { @@ -226,9 +226,9 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { key := simulationPrefix.ChildString(field).ChildString(newName) var err error if value, ok := values[field]; ok { - err = nd.MetadataDS.Put(key, value) + err = nd.MetadataDS.Put(ctx, key, value) } else { - err = nd.MetadataDS.Delete(key) + err = nd.MetadataDS.Delete(ctx, key) } if err != nil { return err diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index 02792e332..21e8bff71 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -90,7 +90,7 @@ type Simulation struct { // loadConfig loads a simulation's config from the datastore. This must be called on startup and may // be called to restore the config from-disk. func (sim *Simulation) loadConfig() error { - configBytes, err := sim.Node.MetadataDS.Get(sim.key("config")) + configBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key("config")) if err == nil { err = json.Unmarshal(configBytes, &sim.config) } @@ -111,7 +111,7 @@ func (sim *Simulation) saveConfig() error { if err != nil { return err } - return sim.Node.MetadataDS.Put(sim.key("config"), buf) + return sim.Node.MetadataDS.Put(context.TODO(), sim.key("config"), buf) } var simulationPrefix = datastore.NewKey("/simulation") @@ -124,7 +124,7 @@ func (sim *Simulation) key(subkey string) datastore.Key { // loadNamedTipSet the tipset with the given name (for this simulation) func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { - tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name)) + tskBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key(name)) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err) } @@ -132,7 +132,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { if err != nil { return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err) } - ts, err := sim.Node.Chainstore.LoadTipSet(tsk) + ts, err := sim.Node.Chainstore.LoadTipSet(context.TODO(), tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err) } @@ -141,7 +141,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { // storeNamedTipSet stores the tipset at name (relative to the simulation). func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error { - if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil { + if err := sim.Node.MetadataDS.Put(context.TODO(), sim.key(name), ts.Key().Bytes()); err != nil { return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err) } return nil @@ -159,7 +159,7 @@ func (sim *Simulation) GetStart() *types.TipSet { // GetNetworkVersion returns the current network version for the simulation. func (sim *Simulation) GetNetworkVersion() network.Version { - return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height()) + return sim.StateManager.GetNetworkVersion(context.TODO(), sim.head.Height()) } // SetHead updates the current head of the simulation and stores it in the metadata store. This is @@ -308,7 +308,7 @@ func (sim *Simulation) Walk( stCid = ts.MinTicketBlock().ParentStateRoot recCid = ts.MinTicketBlock().ParentMessageReceipts - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return xerrors.Errorf("loading parent: %w", err) } @@ -342,7 +342,7 @@ func (sim *Simulation) Walk( break } - msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts) + msgs, err := sim.Node.Chainstore.MessagesForTipset(ctx, job.ts) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/step.go b/cmd/lotus-sim/simulation/step.go index 902f2ad6c..f9d58529e 100644 --- a/cmd/lotus-sim/simulation/step.go +++ b/cmd/lotus-sim/simulation/step.go @@ -41,8 +41,8 @@ func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, e // This isn't what the network does, but it makes things easier. Otherwise, we'd need to run // migrations before this epoch and I'd rather not deal with that. nextHeight := parentTs.Height() + 1 - prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1) - nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight) + prevVer := sim.StateManager.GetNetworkVersion(ctx, nextHeight-1) + nextVer := sim.StateManager.GetNetworkVersion(ctx, nextHeight) if nextVer != prevVer { log.Warnw("packing no messages for version upgrade block", "old", prevVer, diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go index d41e0c098..4bdd21322 100644 --- a/cmd/lotus/backup.go +++ b/cmd/lotus/backup.go @@ -1,7 +1,6 @@ package main import ( - "context" "os" dstore "github.com/ipfs/go-datastore" @@ -88,7 +87,7 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(cctx.Context, "/metadata") if err != nil { return err } @@ -111,10 +110,10 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Resetting chainstore metadata") chainHead := dstore.NewKey("head") - if err := mds.Delete(chainHead); err != nil { + if err := mds.Delete(cctx.Context, chainHead); err != nil { return xerrors.Errorf("clearing chain head: %w", err) } - if err := store.FlushValidationCache(mds); err != nil { + if err := store.FlushValidationCache(cctx.Context, mds); err != nil { return xerrors.Errorf("clearing chain validation cache: %w", err) } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 3f5de8138..813a0a9bd 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -474,7 +474,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to open blockstore: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -499,14 +499,14 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) bar.Units = pb.U_BYTES bar.Start() - ts, err := cst.Import(br) + ts, err := cst.Import(ctx, br) bar.Finish() if err != nil { return xerrors.Errorf("importing chain failed: %w", err) } - if err := cst.FlushValidationCache(); err != nil { + if err := cst.FlushValidationCache(ctx); err != nil { return xerrors.Errorf("flushing validation cache failed: %w", err) } @@ -515,7 +515,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - err = cst.SetGenesis(gb.Blocks()[0]) + err = cst.SetGenesis(ctx, gb.Blocks()[0]) if err != nil { return err } diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 7aa2e704e..c19b9fce4 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -39,7 +39,7 @@ func main() { jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -47,7 +47,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if originBefore != nil { diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index 15bb543a5..750684c36 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "io/fs" "log" "os" "path/filepath" @@ -136,25 +137,31 @@ func processTipsetOpts() error { } func execVectorDir(path string, outdir string) error { - files, err := filepath.Glob(filepath.Join(path, "*")) - if err != nil { - return fmt.Errorf("failed to glob input directory %s: %w", path, err) - } - for _, f := range files { - outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out" + return filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return fmt.Errorf("failed while visiting path %s: %w", path, err) + } + if d.IsDir() || !strings.HasSuffix(path, "json") { + return nil + } + // Create an output file to capture the output from the run of the vector. + outfile := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + ".out" outpath := filepath.Join(outdir, outfile) outw, err := os.Create(outpath) if err != nil { return fmt.Errorf("failed to create file %s: %w", outpath, err) } - log.Printf("processing vector %s; sending output to %s", f, outpath) + log.Printf("processing vector %s; sending output to %s", path, outpath) + + // Actually run the vector. log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output. - _, _ = execVectorFile(new(conformance.LogReporter), f) + _, _ = execVectorFile(new(conformance.LogReporter), path) log.SetOutput(os.Stderr) _ = outw.Close() - } - return nil + + return nil + }) } func execVectorsStdin() error { diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 040005641..ba54a7f9e 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -113,14 +113,14 @@ func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} { return ret } -func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (pb *proxyingBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { pb.lk.Lock() if pb.tracing { pb.traced[cid] = struct{}{} } pb.lk.Unlock() - if block, err := pb.Blockstore.Get(cid); err == nil { + if block, err := pb.Blockstore.Get(ctx, cid); err == nil { return block, err } @@ -134,7 +134,7 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return nil, err } - err = pb.Blockstore.Put(block) + err = pb.Blockstore.Put(ctx, block) if err != nil { return nil, err } @@ -142,16 +142,16 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return block, nil } -func (pb *proxyingBlockstore) Put(block blocks.Block) error { +func (pb *proxyingBlockstore) Put(ctx context.Context, block blocks.Block) error { pb.lk.Lock() if pb.tracing { pb.traced[block.Cid()] = struct{}{} } pb.lk.Unlock() - return pb.Blockstore.Put(block) + return pb.Blockstore.Put(ctx, block) } -func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { +func (pb *proxyingBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { pb.lk.Lock() if pb.tracing { for _, b := range blocks { @@ -159,5 +159,5 @@ func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { } } pb.lk.Unlock() - return pb.Blockstore.PutMany(blocks) + return pb.Blockstore.PutMany(ctx, blocks) } diff --git a/conformance/driver.go b/conformance/driver.go index 8669089da..a065d1530 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -5,6 +5,8 @@ import ( gobig "math/big" "os" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/state" @@ -153,6 +155,14 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params results: []*vm.ApplyRet{}, } + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + vmopt.CircSupplyCalc = func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + return vm.NewVM(ctx, vmopt) + }) + postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(), sm, params.ParentEpoch, @@ -179,11 +189,12 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params } type ExecuteMessageParams struct { - Preroot cid.Cid - Epoch abi.ChainEpoch - Message *types.Message - CircSupply abi.TokenAmount - BaseFee abi.TokenAmount + Preroot cid.Cid + Epoch abi.ChainEpoch + Message *types.Message + CircSupply abi.TokenAmount + BaseFee abi.TokenAmount + NetworkVersion network.Version // Rand is an optional vm.Rand implementation to use. If nil, the driver // will use a vm.Rand that returns a fixed value for all calls. @@ -202,13 +213,6 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP params.Rand = NewFixedRand() } - // dummy state manager; only to reference the GetNetworkVersion method, - // which does not depend on state. - sm, err := stmgr.NewStateManager(nil, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule(), nil) - if err != nil { - return nil, cid.Cid{}, err - } - vmOpts := &vm.VMOpts{ StateBase: params.Preroot, Epoch: params.Epoch, @@ -217,9 +221,9 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP CircSupplyCalc: func(_ context.Context, _ abi.ChainEpoch, _ *state.StateTree) (abi.TokenAmount, error) { return params.CircSupply, nil }, - Rand: params.Rand, - BaseFee: params.BaseFee, - NtwkVersion: sm.GetNtwkVersion, + Rand: params.Rand, + BaseFee: params.BaseFee, + NetworkVersion: params.NetworkVersion, } lvm, err := vm.NewVM(context.TODO(), vmOpts) diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go index c34980efe..d356b53d0 100644 --- a/conformance/rand_fixed.go +++ b/conformance/rand_fixed.go @@ -19,22 +19,10 @@ func NewFixedRand() vm.Rand { return &fixedRand{} } -func (r *fixedRand) GetChainRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } -func (r *fixedRand) GetChainRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV3(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } diff --git a/conformance/rand_record.go b/conformance/rand_record.go index 97bd93eb4..8422ad31d 100644 --- a/conformance/rand_record.go +++ b/conformance/rand_record.go @@ -45,17 +45,9 @@ func (r *RecordingRand) loadHead() { r.head = head.Key() } -func (r *RecordingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) - // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back + // FullNode's v0 ChainGetRandomnessFromTickets handles whether we should be looking forward or back ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) if err != nil { return ret, err @@ -79,19 +71,7 @@ func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.Doma return ret, err } -func (r *RecordingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) ret, err := r.api.StateGetRandomnessFromBeacon(ctx, pers, round, entropy, r.head) if err != nil { diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go index 5d850f7eb..6c2282752 100644 --- a/conformance/rand_replay.go +++ b/conformance/rand_replay.go @@ -43,15 +43,7 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { return nil, false } -func (r *ReplayingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessChain, DomainSeparationTag: int64(pers), @@ -66,26 +58,10 @@ func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.Doma r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetChainRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetChainRandomnessV2(ctx, pers, round, entropy) + return r.fallback.GetChainRandomness(ctx, pers, round, entropy) } -func (r *ReplayingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessBeacon, DomainSeparationTag: int64(pers), @@ -100,9 +76,5 @@ func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.Dom r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetBeaconRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetBeaconRandomnessV3(ctx, pers, round, entropy) + return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy) } diff --git a/conformance/runner.go b/conformance/runner.go index 1044bb329..fd44ecff9 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "fmt" "io/ioutil" + "math" "os" "os/exec" "strconv" @@ -14,6 +15,7 @@ import ( "github.com/fatih/color" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/hashicorp/go-multierror" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" @@ -27,6 +29,7 @@ import ( "github.com/filecoin-project/test-vectors/schema" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) @@ -50,11 +53,58 @@ var TipsetVectorOpts struct { OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult) } +type GasPricingRestoreFn func() + +// adjustGasPricing adjusts the global gas price mapping to make sure that the +// gas pricelist for vector's network version is used at the vector's epoch. +// Because it manipulates a global, it returns a function that reverts the +// change. The caller MUST invoke this function or the test vector runner will +// become invalid. +func adjustGasPricing(vectorEpoch abi.ChainEpoch, vectorNv network.Version) GasPricingRestoreFn { + // Stash the current pricing mapping. + // Ok to take a reference instead of a copy, because we override the map + // with a new one below. + var old = vm.Prices + + // Resolve the epoch at which the vector network version kicks in. + var epoch abi.ChainEpoch = math.MaxInt64 + if vectorNv == network.Version0 { + // genesis is not an upgrade. + epoch = 0 + } else { + for _, u := range filcns.DefaultUpgradeSchedule() { + if u.Network == vectorNv { + epoch = u.Height + break + } + } + } + + if epoch == math.MaxInt64 { + panic(fmt.Sprintf("could not resolve network version %d to height", vectorNv)) + } + + // Find the right pricelist for this network version. + pricelist := vm.PricelistByEpoch(epoch) + + // Override the pricing mapping by setting the relevant pricelist for the + // network version at the epoch where the vector runs. + vm.Prices = map[abi.ChainEpoch]vm.Pricelist{ + vectorEpoch: pricelist, + } + + // Return a function to restore the original mapping. + return func() { + vm.Prices = old + } +} + // ExecuteMessageVector executes a message-class test vector. func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() - baseEpoch = variant.Epoch + baseEpoch = abi.ChainEpoch(variant.Epoch) + nv = network.Version(variant.NetworkVersion) root = vector.Pre.StateTree.RootCID ) @@ -67,6 +117,10 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // Create a new Driver. driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true}) + // Monkey patch the gas pricing. + revertFn := adjustGasPricing(baseEpoch, nv) + defer revertFn() + // Apply every message. for i, m := range vector.ApplyMessages { msg, err := types.DecodeMessage(m.Bytes) @@ -76,18 +130,19 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // add the epoch offset if one is set. if m.EpochOffset != nil { - baseEpoch += *m.EpochOffset + baseEpoch += abi.ChainEpoch(*m.EpochOffset) } // Execute the message. var ret *vm.ApplyRet ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{ - Preroot: root, - Epoch: abi.ChainEpoch(baseEpoch), - Message: msg, - BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), - CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), - Rand: NewReplayingRand(r, vector.Randomness), + Preroot: root, + Epoch: baseEpoch, + Message: msg, + BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), + CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), + Rand: NewReplayingRand(r, vector.Randomness), + NetworkVersion: nv, }) if err != nil { r.Fatalf("fatal failure when executing message: %s", err) @@ -184,8 +239,10 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, label string) { r.Helper() + applyret := actual if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.ExitCode; expected != actual { r.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual) + r.Errorf("\t\\==> actor error: %s", applyret.ActorErr) } if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual { r.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual) @@ -282,7 +339,7 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er continue } // ignore things we don't have, the state tree is incomplete. - if has, err := bs.Has(link.Cid); err != nil { + if has, err := bs.Has(context.TODO(), link.Cid); err != nil { return nil, err } else if has { out = append(out, link) @@ -317,7 +374,7 @@ func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, defer r.Close() // nolint // Load the CAR embedded in the test vector into the Blockstore. - _, err = car.LoadCar(bs, r) + _, err = car.LoadCar(context.TODO(), bs, r) if err != nil { return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) } diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 3d27f0c75..caf0419fe 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -49,6 +49,7 @@ * [LogSetLevel](#LogSetLevel) * [Market](#Market) * [MarketCancelDataTransfer](#MarketCancelDataTransfer) + * [MarketDataTransferDiagnostics](#MarketDataTransferDiagnostics) * [MarketDataTransferUpdates](#MarketDataTransferUpdates) * [MarketGetAsk](#MarketGetAsk) * [MarketGetDealUpdates](#MarketGetDealUpdates) @@ -94,9 +95,13 @@ * [ReturnAddPiece](#ReturnAddPiece) * [ReturnFetch](#ReturnFetch) * [ReturnFinalizeSector](#ReturnFinalizeSector) + * [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData) * [ReturnMoveStorage](#ReturnMoveStorage) + * [ReturnProveReplicaUpdate1](#ReturnProveReplicaUpdate1) + * [ReturnProveReplicaUpdate2](#ReturnProveReplicaUpdate2) * [ReturnReadPiece](#ReturnReadPiece) * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed) + * [ReturnReplicaUpdate](#ReturnReplicaUpdate) * [ReturnSealCommit1](#ReturnSealCommit1) * [ReturnSealCommit2](#ReturnSealCommit2) * [ReturnSealPreCommit1](#ReturnSealPreCommit1) @@ -114,6 +119,7 @@ * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetSealDelay](#SectorGetSealDelay) * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorMatchPendingPiecesToOpenSectors](#SectorMatchPendingPiecesToOpenSectors) * [SectorPreCommitFlush](#SectorPreCommitFlush) * [SectorPreCommitPending](#SectorPreCommitPending) * [SectorRemove](#SectorRemove) @@ -138,6 +144,7 @@ * [StorageDeclareSector](#StorageDeclareSector) * [StorageDropSector](#StorageDropSector) * [StorageFindSector](#StorageFindSector) + * [StorageGetLocks](#StorageGetLocks) * [StorageInfo](#StorageInfo) * [StorageList](#StorageList) * [StorageLocal](#StorageLocal) @@ -209,7 +216,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -236,10 +243,18 @@ Inputs: `null` Response: ```json { - "PreCommitControl": null, - "CommitControl": null, - "TerminateControl": null, - "DealPublishControl": null, + "PreCommitControl": [ + "f01234" + ], + "CommitControl": [ + "f01234" + ], + "TerminateControl": [ + "f01234" + ], + "DealPublishControl": [ + "f01234" + ], "DisableOwnerFallback": true, "DisableWorkerFallback": true } @@ -270,7 +285,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -288,7 +305,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Check @@ -302,7 +324,15 @@ Inputs: ```json [ 8, - null, + [ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + } + ], true ] ``` @@ -325,12 +355,31 @@ Perms: read Inputs: ```json [ - null, - null + [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "Bw==", + 10101, + 15 ] ``` -Response: `null` +Response: +```json +[ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` ## Create @@ -364,7 +413,16 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", + "Success": false, + "Error": "\u003cerror\u003e" + } +] +``` ### DagstoreInitializeAll DagstoreInitializeAll initializes all uninitialized shards in bulk, @@ -440,7 +498,16 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", + "State": "ShardStateAvailable", + "Error": "\u003cerror\u003e" + } +] +``` ### DagstoreRecoverShard DagstoreRecoverShard attempts to recover a failed shard. @@ -542,7 +609,33 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +] +``` ### DealsPieceCidBlocklist @@ -551,7 +644,14 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### DealsSetConsiderOfflineRetrievalDeals @@ -645,7 +745,11 @@ Perms: admin Inputs: ```json [ - null + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] ] ``` @@ -673,7 +777,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -682,7 +807,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -719,6 +849,113 @@ Inputs: Response: `{}` +### MarketDataTransferDiagnostics +MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ReceivingTransfers": [ + { + "RequestID": 4, + "RequestState": "string value", + "IsCurrentChannelRequest": true, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "ChannelState": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Diagnostics": [ + "string value" + ] + } + ], + "SendingTransfers": [ + { + "RequestID": 4, + "RequestState": "string value", + "IsCurrentChannelRequest": true, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "ChannelState": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Diagnostics": [ + "string value" + ] + } + ] +} +``` + ### MarketDataTransferUpdates @@ -741,7 +978,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -879,7 +1129,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### MarketListDeals @@ -888,7 +1171,33 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +] +``` ### MarketListIncompleteDeals @@ -897,7 +1206,65 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": ".lotusminer/fstmp123", + "MetadataPath": ".lotusminer/fstmp123", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "InboundCAR": "string value" + } +] +``` ### MarketListRetrievalDeals @@ -906,7 +1273,51 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "Selector": { + "Raw": "Ynl0ZSBhcnJheQ==" + }, + "PieceCID": null, + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "UnsealPrice": "0", + "StoreID": 42, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "PieceInfo": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Deals": [ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032 + } + ] + }, + "Status": 0, + "Receiver": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "TotalSent": 42, + "FundsReceived": "0", + "Message": "string value", + "CurrentInterval": 42, + "LegacyProtocol": true + } +] +``` ### MarketPendingDeals @@ -918,7 +1329,29 @@ Inputs: `null` Response: ```json { - "Deals": null, + "Deals": [ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ], "PublishPeriodStart": "0001-01-01T00:00:00Z", "PublishPeriod": 60000000000 } @@ -1036,7 +1469,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -1133,9 +1568,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -1152,9 +1593,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -1167,9 +1614,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -1186,7 +1639,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -1237,7 +1692,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -1258,8 +1715,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -1280,7 +1741,17 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` ### NetPubsubScores @@ -1289,7 +1760,28 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` ## Pieces @@ -1314,7 +1806,15 @@ Response: "CID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "PieceBlockLocations": null + "PieceBlockLocations": [ + { + "RelOffset": 42, + "BlockSize": 42, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] } ``` @@ -1338,7 +1838,14 @@ Response: "PieceCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Deals": null + "Deals": [ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032 + } + ] } ``` @@ -1349,7 +1856,14 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### PiecesListPieces @@ -1358,7 +1872,14 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ## Pledge @@ -1440,6 +1961,30 @@ Response: `{}` ### ReturnFinalizeSector +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnGenerateSectorKeyFromData + + Perms: admin Inputs: @@ -1485,6 +2030,58 @@ Inputs: Response: `{}` +### ReturnProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + [ + "Ynl0ZSBhcnJheQ==" + ], + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + "Bw==", + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnReadPiece @@ -1534,6 +2131,38 @@ Inputs: Response: `{}` +### ReturnReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "NewSealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "NewUnsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnSealCommit1 @@ -1549,7 +2178,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1574,7 +2203,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1599,7 +2228,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1785,7 +2414,22 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Sectors": [ + 123, + 124 + ], + "FailedSectors": { + "123": "can't acquire read lock" + }, + "Msg": null, + "Error": "string value" + } +] +``` ### SectorCommitPending SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message @@ -1795,7 +2439,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ### SectorGetExpectedSealDuration SectorGetExpectedSealDuration gets the expected time for a sector to seal @@ -1826,12 +2478,22 @@ Perms: admin Inputs: ```json [ - 9 + 9, + true ] ``` Response: `{}` +### SectorMatchPendingPiecesToOpenSectors + + +Perms: admin + +Inputs: `null` + +Response: `{}` + ### SectorPreCommitFlush SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. Returns null if message wasn't sent @@ -1841,7 +2503,19 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Sectors": [ + 123, + 124 + ], + "Msg": null, + "Error": "string value" + } +] +``` ### SectorPreCommitPending SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message @@ -1851,7 +2525,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ### SectorRemove SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can @@ -1951,7 +2633,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ## Sectors @@ -1981,7 +2671,9 @@ Perms: read Inputs: ```json [ - null + [ + "Proving" + ] ] ``` @@ -2035,14 +2727,49 @@ Response: "CommD": null, "CommR": null, "Proof": "Ynl0ZSBhcnJheQ==", - "Deals": null, - "Pieces": null, + "Deals": [ + 5432 + ], + "Pieces": [ + { + "Piece": { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + "DealInfo": { + "PublishCid": null, + "DealID": 5432, + "DealProposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealSchedule": { + "StartEpoch": 10101, + "EndEpoch": 10101 + }, + "KeepUnsealed": true + } + } + ], "Ticket": { - "Value": null, + "Value": "Bw==", "Epoch": 10101 }, "Seed": { - "Value": null, + "Value": "Bw==", "Epoch": 10101 }, "PreCommitMsg": null, @@ -2050,7 +2777,14 @@ Response: "Retries": 42, "ToUpgrade": true, "LastErr": "string value", - "Log": null, + "Log": [ + { + "Kind": "string value", + "Timestamp": 42, + "Trace": "string value", + "Message": "string value" + } + ], "SealProof": 8, "Activation": 10101, "Expiration": 10101, @@ -2094,7 +2828,7 @@ Inputs: }, 1040384, 1024, - null, + "Bw==", null ] ``` @@ -2144,13 +2878,19 @@ Inputs: [ { "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": null, + "URLs": [ + "string value" + ], "Weight": 42, "MaxStorage": 42, "CanSeal": true, "CanStore": true, - "Groups": null, - "AllowTo": null + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] }, { "Capacity": 9, @@ -2179,7 +2919,27 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": [ + "string value" + ], + "Weight": 42, + "MaxStorage": 42, + "CanSeal": true, + "CanStore": true, + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] + } +] +``` ### StorageDeclareSector @@ -2238,7 +2998,56 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": [ + "string value" + ], + "Weight": 42, + "CanSeal": true, + "CanStore": true, + "Primary": true + } +] +``` + +### StorageGetLocks + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "Locks": [ + { + "Sector": { + "Miner": 1000, + "Number": 123 + }, + "Write": [ + 0, + 0, + 1, + 0, + 0 + ], + "Read": [ + 2, + 3, + 0, + 0, + 0 + ] + } + ] +} +``` ### StorageInfo @@ -2256,13 +3065,19 @@ Response: ```json { "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": null, + "URLs": [ + "string value" + ], "Weight": 42, "MaxStorage": 42, "CanSeal": true, "CanStore": true, - "Groups": null, - "AllowTo": null + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] } ``` diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index 7a1c2e2f2..f2d9019e2 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -11,12 +11,19 @@ * [AddPiece](#AddPiece) * [Finalize](#Finalize) * [FinalizeSector](#FinalizeSector) +* [Generate](#Generate) + * [GenerateSectorKeyFromData](#GenerateSectorKeyFromData) * [Move](#Move) * [MoveStorage](#MoveStorage) * [Process](#Process) * [ProcessSession](#ProcessSession) +* [Prove](#Prove) + * [ProveReplicaUpdate1](#ProveReplicaUpdate1) + * [ProveReplicaUpdate2](#ProveReplicaUpdate2) * [Release](#Release) * [ReleaseUnsealed](#ReleaseUnsealed) +* [Replica](#Replica) + * [ReplicaUpdate](#ReplicaUpdate) * [Seal](#Seal) * [SealCommit1](#SealCommit1) * [SealCommit2](#SealCommit2) @@ -96,7 +103,9 @@ Response: "MemSwap": 42, "MemSwapUsed": 42, "CPUs": 42, - "GPUs": null, + "GPUs": [ + "string value" + ], "Resources": { "seal/v0/addpiece": { "0": { @@ -684,7 +693,18 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "Weight": 42, + "LocalPath": "string value", + "CanSeal": true, + "CanStore": true + } +] +``` ### Remove Storage / Other @@ -721,7 +741,7 @@ Perms: admin Inputs: `null` -Response: `131328` +Response: `131584` ## Add @@ -742,7 +762,9 @@ Inputs: }, "ProofType": 8 }, - null, + [ + 1024 + ], 1024, {} ] @@ -777,7 +799,47 @@ Inputs: }, "ProofType": 8 }, - null + [ + { + "Offset": 1024, + "Size": 1024 + } + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Generate + + +### GenerateSectorKeyFromData + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } ] ``` @@ -839,6 +901,88 @@ Inputs: `null` Response: `"07070707-0707-0707-0707-070707070707"` +## Prove + + +### ProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### ProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + [ + "Ynl0ZSBhcnJheQ==" + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + ## Release @@ -857,7 +1001,52 @@ Inputs: }, "ProofType": 8 }, - null + [ + { + "Offset": 1024, + "Size": 1024 + } + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Replica + + +### ReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] ] ``` @@ -890,9 +1079,16 @@ Inputs: }, "ProofType": 8 }, - null, - null, - null, + "Bw==", + "Bw==", + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], { "Unsealed": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -930,7 +1126,7 @@ Inputs: }, "ProofType": 8 }, - null + "Bw==" ] ``` @@ -960,8 +1156,15 @@ Inputs: }, "ProofType": 8 }, - null, - null + "Bw==", + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] ] ``` @@ -991,7 +1194,7 @@ Inputs: }, "ProofType": 8 }, - null + "Bw==" ] ``` @@ -1108,7 +1311,7 @@ Inputs: }, 1040384, 1024, - null, + "Bw==", { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" } diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 2e57b8d7d..88c4d8187 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -283,7 +283,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -299,7 +299,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -317,7 +319,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Beacon The Beacon method group contains methods for interacting with the random beacon (DRAND) @@ -422,9 +429,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -479,9 +500,54 @@ Inputs: Response: ```json { - "BlsMessages": null, - "SecpkMessages": null, - "Cids": null + "BlsMessages": [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "SecpkMessages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -557,7 +623,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetNode @@ -597,7 +687,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetParentReceipts ChainGetParentReceipts returns receipts for messages in parent tipset of @@ -616,7 +730,16 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` ### ChainGetPath ChainGetPath returns a set of revert/apply operations needed to get from @@ -658,7 +781,19 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainGetRandomnessFromBeacon ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. @@ -683,7 +818,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### ChainGetRandomnessFromTickets ChainGetRandomnessFromTickets is used to sample the chain for randomness. @@ -708,7 +843,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### ChainGetTipSet ChainGetTipSet returns the tipset specified by the given TipSetKey. @@ -814,7 +949,19 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainReadObj ChainReadObj reads ipld nodes referenced by the specified CID from chain @@ -988,7 +1135,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -1058,7 +1218,30 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + } +] +``` ### ClientGenCar ClientGenCar generates a CAR file for the specified file. @@ -1103,7 +1286,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1142,7 +1339,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1180,7 +1390,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1219,7 +1443,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1267,7 +1504,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } }, "Event": 5 @@ -1325,7 +1575,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### ClientListDeals ClientListDeals returns information about the deals made by the local client. @@ -1335,7 +1618,88 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } + } +] +``` ### ClientListImports ClientListImports lists imported files and their root CIDs @@ -1345,7 +1709,19 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": 50, + "Err": "string value", + "Root": null, + "Source": "string value", + "FilePath": "string value", + "CARPath": "string value" + } +] +``` ### ClientListRetrievals ClientQueryAsk returns a signed StorageAsk from the specified miner. @@ -1356,7 +1732,61 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 + } +] +``` ### ClientMinerQueryOffer ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. @@ -1386,6 +1816,7 @@ Response: "Size": 42, "MinPrice": "0", "UnsealPrice": "0", + "PricePerByte": "0", "PaymentInterval": 42, "PaymentIntervalIncrease": 42, "Miner": "f01234", @@ -1829,7 +2260,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -1838,7 +2290,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -1983,11 +2440,46 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconValues": null, - "Messages": null, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], "Epoch": 10101, "Timestamp": 42, - "WinningPoStProof": null + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] } ] ``` @@ -2004,9 +2496,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -2030,8 +2536,16 @@ Response: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -2061,14 +2575,28 @@ Response: { "MinerPower": "0", "NetworkPower": "0", - "Sectors": null, + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], "WorkerKey": "f01234", "SectorSize": 34359738368, "PrevBeaconEntry": { "Round": 42, "Data": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], "EligibleForMining": true } ``` @@ -2087,11 +2615,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolBatchPushMessage MpoolBatchPushMessage batch pushes a unsigned message to mempool. @@ -2102,14 +2662,58 @@ Perms: sign Inputs: ```json [ - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], { "MaxFee": "0" } ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolBatchPushUntrusted MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. @@ -2120,11 +2724,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolClear MpoolClear clears pending messages from the mpool @@ -2152,7 +2788,9 @@ Inputs: `null` Response: ```json { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2197,7 +2835,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolPush MpoolPush pushes a signed message to mempool. @@ -2369,7 +3035,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolSetConfig MpoolSetConfig sets the mpool config to (a copy of) the supplied config @@ -2381,7 +3075,9 @@ Inputs: ```json [ { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2609,7 +3305,9 @@ Inputs: ```json [ 42, - null, + [ + "f01234" + ], 10101, "0", "f01234", @@ -2670,7 +3368,21 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": 9, + "To": "f01234", + "Value": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Approved": [ + "f01234" + ] + } +] +``` ### MsigGetVested MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. @@ -2880,7 +3592,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -2977,9 +3691,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -2996,9 +3716,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -3011,9 +3737,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3030,7 +3762,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -3081,7 +3815,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -3102,8 +3838,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -3124,7 +3864,17 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` ### NetPubsubScores @@ -3133,7 +3883,28 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` ## Paych The Paych methods are for interacting with and managing payment channels @@ -3272,7 +4043,12 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### PaychNewPayment @@ -3284,7 +4060,19 @@ Inputs: [ "f01234", "f01234", - null + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] ] ``` @@ -3295,7 +4083,33 @@ Response: "WaitSentinel": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Vouchers": null + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] } ``` @@ -3361,7 +4175,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3397,7 +4216,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3433,7 +4257,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3475,7 +4304,12 @@ Response: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3497,7 +4331,36 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` ### PaychVoucherSubmit @@ -3522,7 +4385,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3590,7 +4458,15 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Miner": "f01234", + "Epoch": 10101 + } +] +``` ### StateCall StateCall runs the given message and returns its result without any persisted changes. @@ -3692,8 +4568,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -3799,7 +4740,23 @@ Inputs: ```json [ 10101, - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], [ { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -3817,7 +4774,138 @@ Response: "Root": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Trace": null + "Trace": [ + { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 + } + ] } ``` @@ -3935,7 +5023,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetRandomnessFromTickets StateGetRandomnessFromTickets is used to sample the chain for randomness. @@ -3960,7 +5048,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetReceipt StateGetReceipt returns the message receipt for the given message or for a @@ -4022,7 +5110,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateListMessages StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. @@ -4049,7 +5142,14 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### StateListMiners StateListMiners returns the addresses of every miner that has claimed power in the Power Actor @@ -4071,7 +5171,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateLookupID StateLookupID retrieves the ID address of the given address @@ -4271,7 +5376,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateMinerAvailableBalance StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent @@ -4317,7 +5444,18 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` ### StateMinerFaults StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner @@ -4375,10 +5513,14 @@ Response: "Owner": "f01234", "Worker": "f01234", "NewWorker": "f01234", - "ControlAddresses": null, + "ControlAddresses": [ + "f01234" + ], "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Multiaddrs": null, + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, @@ -4403,7 +5545,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4445,7 +5589,33 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` ### StateMinerPower StateMinerPower returns the power of the indicated miner @@ -4500,7 +5670,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4667,7 +5839,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateNetworkName StateNetworkName returns the name of the network the node is synced to @@ -4699,7 +5893,7 @@ Inputs: ] ``` -Response: `14` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -4833,8 +6027,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -5010,14 +6269,17 @@ Response: "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Activation": 10101, "Expiration": 10101, "DealWeight": "0", "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` @@ -5083,7 +6345,9 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -5379,9 +6643,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5436,7 +6714,26 @@ Inputs: `null` Response: ```json { - "ActiveSyncs": null, + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], "VMApplied": 42 } ``` @@ -5461,9 +6758,23 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5487,8 +6798,16 @@ Inputs: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ] ``` @@ -5649,7 +6968,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### WalletNew WalletNew creates a new address in the wallet with the given sigType. diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 1a14dbd71..7d5f4665e 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -289,7 +289,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -305,7 +305,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -323,7 +325,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Beacon The Beacon method group contains methods for interacting with the random beacon (DRAND) @@ -454,9 +461,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -511,9 +532,54 @@ Inputs: Response: ```json { - "BlsMessages": null, - "SecpkMessages": null, - "Cids": null + "BlsMessages": [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "SecpkMessages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -589,7 +655,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetNode @@ -629,7 +719,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetParentReceipts ChainGetParentReceipts returns receipts for messages in parent tipset of @@ -648,7 +762,16 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` ### ChainGetPath ChainGetPath returns a set of revert/apply operations needed to get from @@ -690,7 +813,19 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainGetTipSet ChainGetTipSet returns the tipset specified by the given TipSetKey. @@ -828,7 +963,19 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainReadObj ChainReadObj reads ipld nodes referenced by the specified CID from chain @@ -1002,7 +1149,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -1069,7 +1229,12 @@ Inputs: "Root": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "DAGs": null, + "DAGs": [ + { + "DataSelector": "Links/21/Hash/Links/42/Hash", + "ExportMerkleProof": true + } + ], "FromLocalCAR": "string value", "DealID": 5 }, @@ -1098,7 +1263,30 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + } +] +``` ### ClientGenCar ClientGenCar generates a CAR file for the specified file. @@ -1143,7 +1331,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1182,7 +1384,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1220,7 +1435,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1259,7 +1488,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1307,7 +1549,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } }, "Event": 5 @@ -1365,7 +1620,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### ClientListDeals ClientListDeals returns information about the deals made by the local client. @@ -1375,7 +1663,88 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } + } +] +``` ### ClientListImports ClientListImports lists imported files and their root CIDs @@ -1385,7 +1754,19 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": 50, + "Err": "string value", + "Root": null, + "Source": "string value", + "FilePath": "string value", + "CARPath": "string value" + } +] +``` ### ClientListRetrievals ClientListRetrievals returns information about retrievals made by the local client @@ -1395,7 +1776,61 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 + } +] +``` ### ClientMinerQueryOffer ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. @@ -1425,6 +1860,7 @@ Response: "Size": 42, "MinPrice": "0", "UnsealPrice": "0", + "PricePerByte": "0", "PaymentInterval": 42, "PaymentIntervalIncrease": 42, "Miner": "f01234", @@ -1836,7 +2272,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -1845,7 +2302,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -1990,11 +2452,46 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconValues": null, - "Messages": null, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], "Epoch": 10101, "Timestamp": 42, - "WinningPoStProof": null + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] } ] ``` @@ -2011,9 +2508,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -2037,8 +2548,16 @@ Response: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -2068,14 +2587,28 @@ Response: { "MinerPower": "0", "NetworkPower": "0", - "Sectors": null, + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], "WorkerKey": "f01234", "SectorSize": 34359738368, "PrevBeaconEntry": { "Round": 42, "Data": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], "EligibleForMining": true } ``` @@ -2094,11 +2627,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolBatchPushMessage MpoolBatchPushMessage batch pushes a unsigned message to mempool. @@ -2109,14 +2674,58 @@ Perms: sign Inputs: ```json [ - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], { "MaxFee": "0" } ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolBatchPushUntrusted MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. @@ -2127,11 +2736,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolCheckMessages MpoolCheckMessages performs logical checks on a batch of messages @@ -2142,11 +2783,47 @@ Perms: read Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true + } + ] ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolCheckPendingMessages MpoolCheckPendingMessages performs logical checks for all pending messages from a given address @@ -2161,7 +2838,24 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolCheckReplaceMessages MpoolCheckReplaceMessages performs logical checks on pending messages with replacement @@ -2172,11 +2866,44 @@ Perms: read Inputs: ```json [ - null + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolClear MpoolClear clears pending messages from the mpool @@ -2204,7 +2931,9 @@ Inputs: `null` Response: ```json { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2249,7 +2978,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolPush MpoolPush pushes a signed message to mempool. @@ -2421,7 +3178,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolSetConfig MpoolSetConfig sets the mpool config to (a copy of) the supplied config @@ -2433,7 +3218,9 @@ Inputs: ```json [ { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2789,7 +3576,9 @@ Inputs: ```json [ 42, - null, + [ + "f01234" + ], 10101, "0", "f01234", @@ -2865,7 +3654,21 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": 9, + "To": "f01234", + "Value": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Approved": [ + "f01234" + ] + } +] +``` ### MsigGetVested MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. @@ -3150,7 +3953,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -3247,9 +4052,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3266,9 +4077,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -3281,9 +4098,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3300,7 +4123,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -3351,7 +4176,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -3372,8 +4199,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -3394,7 +4225,17 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` ### NetPubsubScores @@ -3403,7 +4244,28 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` ## Node These methods are general node management and status commands @@ -3576,7 +4438,12 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### PaychNewPayment @@ -3588,7 +4455,19 @@ Inputs: [ "f01234", "f01234", - null + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] ] ``` @@ -3599,7 +4478,33 @@ Response: "WaitSentinel": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Vouchers": null + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] } ``` @@ -3665,7 +4570,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3701,7 +4611,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3737,7 +4652,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3779,7 +4699,12 @@ Response: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3801,7 +4726,36 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` ### PaychVoucherSubmit @@ -3826,7 +4780,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3894,7 +4853,15 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Miner": "f01234", + "Epoch": 10101 + } +] +``` ### StateCall StateCall runs the given message and returns its result without any persisted changes. @@ -3996,8 +4963,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -4103,7 +5135,23 @@ Inputs: ```json [ 10101, - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], [ { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -4121,7 +5169,138 @@ Response: "Root": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Trace": null + "Trace": [ + { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 + } + ] } ``` @@ -4194,7 +5373,7 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, 1, - null + "json raw message" ] ``` @@ -4258,7 +5437,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetRandomnessFromTickets StateGetRandomnessFromTickets is used to sample the chain for randomness. @@ -4283,7 +5462,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateListActors StateListActors returns the addresses of every actor in the state @@ -4305,7 +5484,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateListMessages StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. @@ -4332,7 +5516,14 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### StateListMiners StateListMiners returns the addresses of every miner that has claimed power in the Power Actor @@ -4354,7 +5545,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateLookupID StateLookupID retrieves the ID address of the given address @@ -4554,7 +5750,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateMinerAvailableBalance StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent @@ -4600,7 +5818,18 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` ### StateMinerFaults StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner @@ -4658,10 +5887,14 @@ Response: "Owner": "f01234", "Worker": "f01234", "NewWorker": "f01234", - "ControlAddresses": null, + "ControlAddresses": [ + "f01234" + ], "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Multiaddrs": null, + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, @@ -4686,7 +5919,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4728,7 +5963,33 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` ### StateMinerPower StateMinerPower returns the power of the indicated miner @@ -4783,7 +6044,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4950,7 +6213,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateNetworkName StateNetworkName returns the name of the network the node is synced to @@ -4982,7 +6267,7 @@ Inputs: ] ``` -Response: `14` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -5116,8 +6401,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -5250,14 +6600,17 @@ Response: "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Activation": 10101, "Expiration": 10101, "DealWeight": "0", "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` @@ -5323,7 +6676,9 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -5566,9 +6921,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5623,7 +6992,26 @@ Inputs: `null` Response: ```json { - "ActiveSyncs": null, + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], "VMApplied": 42 } ``` @@ -5648,9 +7036,23 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5674,8 +7076,16 @@ Inputs: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ] ``` @@ -5836,7 +7246,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### WalletNew WalletNew creates a new address in the wallet with the given sigType. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 1485a064f..856202f7f 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.13.2-rc7 + 1.13.3-dev COMMANDS: init Initialize a lotus miner repo @@ -974,10 +974,11 @@ USAGE: lotus-miner data-transfers command [command options] [arguments...] COMMANDS: - list List ongoing data transfers for this miner - restart Force restart a stalled data transfer - cancel Force cancel a data transfer - help, h Shows a list of commands or help for one command + list List ongoing data transfers for this miner + restart Force restart a stalled data transfer + cancel Force cancel a data transfer + diagnostics Get detailed diagnostics on active transfers with a specific peer + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1034,6 +1035,19 @@ OPTIONS: ``` +### lotus-miner data-transfers diagnostics +``` +NAME: + lotus-miner data-transfers diagnostics - Get detailed diagnostics on active transfers with a specific peer + +USAGE: + lotus-miner data-transfers diagnostics [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner dagstore ``` NAME: @@ -1500,23 +1514,25 @@ USAGE: lotus-miner sectors command [command options] [arguments...] COMMANDS: - status Get the seal status of a sector by its number - list List sectors - refs List References to sectors - update-state ADVANCED: manually update the state of a sector, this may aid in error recovery - pledge store random data in a sector - check-expire Inspect expiring sectors - expired Get or cleanup expired sectors - renew Renew expiring sectors while not exceeding each sector's max life - extend Extend sector expiration - terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) - remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) - mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals - seal Manually start sealing a sector (filling any unused space with junk) - set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts - get-cc-collateral Get the collateral required to pledge a committed capacity sector - batching manage batch sector operations - help, h Shows a list of commands or help for one command + status Get the seal status of a sector by its number + list List sectors + refs List References to sectors + update-state ADVANCED: manually update the state of a sector, this may aid in error recovery + pledge store random data in a sector + check-expire Inspect expiring sectors + expired Get or cleanup expired sectors + renew Renew expiring sectors while not exceeding each sector's max life + extend Extend sector expiration + terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + snap-up Mark a committed capacity sector to be filled with deals + mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals + seal Manually start sealing a sector (filling any unused space with junk) + set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts + get-cc-collateral Get the collateral required to pledge a committed capacity sector + batching manage batch sector operations + match-pending-pieces force a refreshed match of pending pieces to open sectors without manually waiting for more deals + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1732,6 +1748,19 @@ OPTIONS: ``` +### lotus-miner sectors snap-up +``` +NAME: + lotus-miner sectors snap-up - Mark a committed capacity sector to be filled with deals + +USAGE: + lotus-miner sectors snap-up [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + ### lotus-miner sectors mark-for-upgrade ``` NAME: @@ -1832,6 +1861,19 @@ OPTIONS: ``` +### lotus-miner sectors match-pending-pieces +``` +NAME: + lotus-miner sectors match-pending-pieces - force a refreshed match of pending pieces to open sectors without manually waiting for more deals + +USAGE: + lotus-miner sectors match-pending-pieces [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner proving ``` NAME: @@ -1941,6 +1983,7 @@ COMMANDS: list list local storage paths find find sector in the storage system cleanup trigger cleanup actions + locks show active sector locks help, h Shows a list of commands or help for one command OPTIONS: @@ -2050,6 +2093,19 @@ OPTIONS: ``` +### lotus-miner storage locks +``` +NAME: + lotus-miner storage locks - show active sector locks + +USAGE: + lotus-miner storage locks [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner sealing ``` NAME: diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 8a52005f2..c03a8e342 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.13.2-rc7 + 1.13.3-dev COMMANDS: run Start lotus worker @@ -44,6 +44,8 @@ OPTIONS: --unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true) --precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true) --commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true) + --replica-update enable replica update (default: true) + --prove-replica-update2 enable prove replica update 2 (default: true) --parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) --timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") --help, -h show help (default: false) diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index af46cc83b..606921f87 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.13.2-rc7 + 1.13.3-dev COMMANDS: daemon Start a lotus daemon process @@ -425,6 +425,7 @@ COMMANDS: stat Print information about a locally stored file (piece size, etc) RETRIEVAL: find Find data in the network + retrieval-ask Get a miner's retrieval ask retrieve Retrieve data from network cat Show data from network ls List object links @@ -535,6 +536,23 @@ OPTIONS: ``` +### lotus client retrieval-ask +``` +NAME: + lotus client retrieval-ask - Get a miner's retrieval ask + +USAGE: + lotus client retrieval-ask [command options] [minerAddress] [data CID] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --size value data size in bytes (default: 0) + --help, -h show help (default: false) + +``` + ### lotus client retrieve ``` NAME: diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index d402f65ed..55ddbb054 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -207,6 +207,17 @@ # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE #SimultaneousTransfersForStorage = 20 + # The maximum number of simultaneous data transfers from any single client + # for storage deals. + # Unset by default (0), and values higher than SimultaneousTransfersForStorage + # will have no effect; i.e. the total number of simultaneous data transfers + # across all storage clients is bound by SimultaneousTransfersForStorage + # regardless of this number. + # + # type: uint64 + # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT + #SimultaneousTransfersForStoragePerClient = 0 + # The maximum number of parallel online data transfers for retrieval deals # # type: uint64 @@ -413,6 +424,12 @@ # env var: LOTUS_STORAGE_ALLOWUNSEAL #AllowUnseal = true + # env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE + #AllowReplicaUpdate = true + + # env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2 + #AllowProveReplicaUpdate2 = true + # env var: LOTUS_STORAGE_RESOURCEFILTERING #ResourceFiltering = "hardware" diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 791238933..52d80081b 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 7912389334e347bbb2eac0520c836830875c39de +Subproject commit 52d80081bfdd8a30bc44bcfe44cb0f299615b9f3 diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 61aceadaf..e3939d3d1 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/detailyang/go-fallocate" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" @@ -253,6 +254,23 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err return pieceCID, werr() } +func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string) (bool, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage) + if xerrors.Is(err, storiface.ErrSectorNotFound) { + return false, nil + } else if err != nil { + return false, xerrors.Errorf("reading updated replica: %w", err) + } + defer done() + + // Sector data stored in replica update + updateProof, err := sector.ProofType.RegisteredUpdateProof() + if err != nil { + return false, err + } + return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, paths.Update, paths.Sealed, paths.Cache, commD) +} + func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { ssize, err := sector.ProofType.SectorSize() if err != nil { @@ -303,6 +321,16 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off return nil } + // If piece data stored in updated replica decode whole sector + decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed) + if err != nil { + return xerrors.Errorf("decoding sector from replica: %w", err) + } + if decoded { + return pf.MarkAllocated(0, maxPieceSize) + } + + // Piece data sealed in sector srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) @@ -639,6 +667,108 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) } +func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + empty := storage.ReplicaUpdateOut{} + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + if err != nil { + return empty, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + s, err := os.Stat(paths.Sealed) + if err != nil { + return empty, err + } + sealedSize := s.Size() + + u, err := os.OpenFile(paths.Update, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return empty, xerrors.Errorf("ensuring updated replica file exists: %w", err) + } + if err := fallocate.Fallocate(u, 0, sealedSize); err != nil { + return empty, xerrors.Errorf("allocating space for replica update file: %w", err) + } + + if err := u.Close(); err != nil { + return empty, err + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint + if os.IsExist(err) { + log.Warnf("existing cache in %s; removing", paths.Cache) + + if err := os.RemoveAll(paths.UpdateCache); err != nil { + return empty, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.Cache, sector, err) + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint:gosec + return empty, xerrors.Errorf("mkdir cache path after cleanup: %w", err) + } + } else { + return empty, err + } + } + sealed, unsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache, paths.Unsealed, pieces) + if err != nil { + return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err) + } + return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil +} + +func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdateCache|storiface.FTUpdate, storiface.FTNone, storiface.PathSealing) + if err != nil { + return nil, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + vanillaProofs, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, sectorKey, newSealed, newUnsealed, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache) + if err != nil { + return nil, xerrors.Errorf("failed to generate proof of replica update for sector %d: %w", sector.ID.Number, err) + } + return vanillaProofs, nil +} + +func (sb *Sealer) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, sectorKey, newSealed, newUnsealed, vanillaProofs) +} + +func (sb *Sealer) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed, storiface.PathSealing) + defer done() + if err != nil { + return xerrors.Errorf("failed to acquire sector paths: %w", err) + } + + s, err := os.Stat(paths.Update) + if err != nil { + return xerrors.Errorf("measuring update file size: %w", err) + } + sealedSize := s.Size() + e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return xerrors.Errorf("ensuring sector key file exists: %w", err) + } + if err := fallocate.Fallocate(e, 0, sealedSize); err != nil { + return xerrors.Errorf("allocating space for sector key file: %w", err) + } + if err := e.Close(); err != nil { + return err + } + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.RemoveData(updateProofType, paths.Sealed, paths.Cache, paths.Update, paths.UpdateCache, paths.Unsealed, commD) +} + +func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ssize, err := sector.ProofType.SectorSize() if err != nil { @@ -723,6 +853,14 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, return xerrors.Errorf("not supported at this layer") } +func (sb *Sealer) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + +func (sb *Sealer) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error { return xerrors.Errorf("not supported at this layer") // happens in localworker } diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 509efe532..cf8978464 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -19,6 +19,7 @@ import ( proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -180,16 +181,16 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} - sis := make([]proof2.SectorInfo, len(seals)) + xsis := make([]proof7.ExtendedSectorInfo, len(seals)) for i, s := range seals { - sis[i] = proof2.SectorInfo{ + xsis[i] = proof7.ExtendedSectorInfo{ SealProof: s.ref.ProofType, SectorNumber: s.ref.ID.Number, SealedCID: s.cids.Sealed, } } - proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness) + proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, xsis, randomness) if len(skipped) > 0 { require.Error(t, err) require.EqualValues(t, skipped, skp) @@ -200,7 +201,16 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { t.Fatalf("%+v", err) } - ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof2.WindowPoStVerifyInfo{ + sis := make([]proof7.SectorInfo, len(seals)) + for i, xsi := range xsis { + sis[i] = proof7.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } + + ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof7.WindowPoStVerifyInfo{ Randomness: randomness, Proofs: proofs, ChallengedSectors: sis, diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index a5b2fdf1f..b8d9e90f1 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -34,10 +34,11 @@ type Storage interface { } type Verifier interface { - VerifySeal(proof5.SealVerifyInfo) (bool, error) - VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } @@ -46,7 +47,7 @@ type Verifier interface { type Prover interface { // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here - AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) + AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) } type SectorProvider interface { diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index ff35ddc1f..6adda05c9 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -11,15 +11,16 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + ffiproof "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { randomness[31] &= 0x3f - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } @@ -31,12 +32,13 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) } + defer done() if len(skipped) > 0 { @@ -52,11 +54,10 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s Number: f, }) } - return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubExtendedSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -80,14 +81,32 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, ProofType: s.SealProof, } - - paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) - if err != nil { - log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err) - skipped = append(skipped, sid.ID) - continue + proveUpdate := s.SectorKey != nil + var cache string + var sealed string + if proveUpdate { + log.Debugf("Posting over updated sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTUpdateCache|storiface.FTUpdate, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTUpdateCache and FTUpdate of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.UpdateCache + sealed = paths.Update + } else { + log.Debugf("Posting over sector key sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTCache and FTSealed of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.Cache + sealed = paths.Sealed } - doneFuncs = append(doneFuncs, d) postProofType, err := rpt(s.SealProof) if err != nil { @@ -95,11 +114,16 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) } + ffiInfo := ffiproof.SectorInfo{ + SealProof: s.SealProof, + SectorNumber: s.SectorNumber, + SealedCID: s.SealedCID, + } out = append(out, ffi.PrivateSectorInfo{ - CacheDirPath: paths.Cache, + CacheDirPath: cache, PoStProofType: postProofType, - SealedSectorPath: paths.Sealed, - SectorInfo: s, + SealedSectorPath: sealed, + SectorInfo: ffiInfo, }) } @@ -112,15 +136,19 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (proofVerifier) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) { return ffi.VerifyAggregateSeals(aggregate) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) { + return ffi.SectorUpdate.VerifyUpdateProof(update) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -128,7 +156,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningP return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index fb081ee5d..ecabf0398 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -98,11 +98,13 @@ type SealerConfig struct { ParallelFetchLimit int // Local worker config - AllowAddPiece bool - AllowPreCommit1 bool - AllowPreCommit2 bool - AllowCommit bool - AllowUnseal bool + AllowAddPiece bool + AllowPreCommit1 bool + AllowPreCommit2 bool + AllowCommit bool + AllowUnseal bool + AllowReplicaUpdate bool + AllowProveReplicaUpdate2 bool // ResourceFiltering instructs the system which resource filtering strategy // to use when evaluating tasks against this worker. An empty value defaults @@ -144,7 +146,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, } if sc.AllowAddPiece { localTasks = append(localTasks, sealtasks.TTAddPiece) @@ -161,6 +163,12 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store if sc.AllowUnseal { localTasks = append(localTasks, sealtasks.TTUnseal) } + if sc.AllowReplicaUpdate { + localTasks = append(localTasks, sealtasks.TTReplicaUpdate) + } + if sc.AllowProveReplicaUpdate2 { + localTasks = append(localTasks, sealtasks.TTProveReplicaUpdate2) + } wcfg := WorkerConfig{ IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled, @@ -573,11 +581,89 @@ func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, return nil } +func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil) +} + +func (m *Manager) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUpdateCache|storiface.FTUpdate); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); err != nil { + return xerrors.Errorf("removing update cache: %w", err) + } + if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); err != nil { + return xerrors.Errorf("removing update: %w", err) + } + return nil +} + +func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTRegenSectorKey, sector, commD) + if err != nil { + return xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + _, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + } + + if wait { // already in progress + waitRes() + return waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed|storiface.FTCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort + selector := newExistingSelector(m.index, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTCache, true) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTRegenSectorKey, selector, m.schedFetch(sector, storiface.FTUpdate|storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.GenerateSectorKeyFromData(ctx, sector, commD)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return err + } + + return waitErr +} + func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } @@ -592,10 +678,161 @@ func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } return err } +func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + log.Errorf("manager is doing replica update") + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces) + if err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = xerrors.Errorf("waitWork: %w", werr) + return + } + if p != nil { + out = p.(storage.ReplicaUpdateOut) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + + selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + log.Errorf("scheduled work for replica update") + err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces)) + if err != nil { + return xerrors.Errorf("startWork: %w", err) + } + + waitRes() + return nil + }) + if err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("Schedule: %w", err) + } + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (out storage.ReplicaVanillaProofs, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate1, sector, sectorKey, newSealed, newUnsealed) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaVanillaProofs) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTUpdate|storiface.FTCache|storiface.FTUpdateCache, storiface.FTNone); err != nil { + return nil, xerrors.Errorf("acquiring sector lock: %w", err) + } + + selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, true) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return nil, err + } + + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (out storage.ReplicaUpdateProof, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate2, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaUpdateProof) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + selector := newTaskSelector() + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate2, selector, schedNop, func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)) + if err != nil { + return err + } + + waitRes() + return nil + }) + + if err != nil { + return nil, err + } + + return out, waitErr +} + func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { return m.returnResult(ctx, callID, pi, err) } @@ -624,6 +861,22 @@ func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.Ca return m.returnResult(ctx, callID, nil, err) } +func (m *Manager) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error { + return m.returnResult(ctx, callID, proof, err) +} + +func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) +} + func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(ctx, callID, nil, err) } diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index 332a08817..7c8703e89 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -385,7 +385,6 @@ func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r i if ok { return xerrors.Errorf("result for call %v already reported", wid) } - m.results[wid] = res err := m.work.Get(wid).Mutate(func(ws *WorkState) error { diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index 4a8ca5f22..04ab0c5ce 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -5,6 +5,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -17,10 +18,12 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -65,7 +68,12 @@ func newTestStorage(t *testing.T) *testStorage { } func (t testStorage) cleanup() { + noCleanup := os.Getenv("LOTUS_TEST_NO_CLEANUP") != "" for _, path := range t.StoragePaths { + if noCleanup { + fmt.Printf("Not cleaning up test storage at %s\n", path) + continue + } if err := os.RemoveAll(path.Path); err != nil { fmt.Println("Cleanup error:", err) } @@ -162,6 +170,150 @@ func TestSimple(t *testing.T) { require.NoError(t, err) } +type Reader struct{} + +func (Reader) Read(out []byte) (int, error) { + for i := range out { + out[i] = 0 + } + return len(out), nil +} + +type NullReader struct { + *io.LimitedReader +} + +func NewNullReader(size abi.UnpaddedPieceSize) io.Reader { + return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)} +} + +func (m NullReader) NullBytes() int64 { + return m.N +} + +func TestSnapDeals(t *testing.T) { + logging.SetAllLoggers(logging.LevelWarn) + ctx := context.Background() + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore()) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize, + sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal, + sealtasks.TTRegenSectorKey, + } + wds := datastore.NewMapDatastore() + + w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds)) + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + proofType := abi.RegisteredSealProof_StackedDrg2KiBV1 + ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE") + switch ptStr { + case "2k": + case "8M": + proofType = abi.RegisteredSealProof_StackedDrg8MiBV1 + case "512M": + proofType = abi.RegisteredSealProof_StackedDrg512MiBV1 + case "32G": + proofType = abi.RegisteredSealProof_StackedDrg32GiBV1 + case "64G": + proofType = abi.RegisteredSealProof_StackedDrg64GiBV1 + default: + log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'") + log.Warn("Continuing test with 2k sectors") + } + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: proofType, + } + ss, err := proofType.SectorSize() + require.NoError(t, err) + + unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded() + + // Pack sector with no pieces + p0, err := m.AddPiece(ctx, sid, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize)) + require.NoError(t, err) + ccPieces := []abi.PieceInfo{p0} + + // Precommit and Seal a CC sector + fmt.Printf("PC1\n") + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces) + require.NoError(t, err) + fmt.Printf("PC2\n") + pc2Out, err := m.SealPreCommit2(ctx, sid, pc1Out) + require.NoError(t, err) + + // Now do a snap deals replica update + sectorKey := pc2Out.Sealed + + // Two pieces each half the size of the sector + unpaddedPieceSize := unpaddedSectorSize / 2 + p1, err := m.AddPiece(ctx, sid, nil, unpaddedPieceSize, strings.NewReader(strings.Repeat("k", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + p2, err := m.AddPiece(ctx, sid, []abi.UnpaddedPieceSize{p1.Size.Unpadded()}, unpaddedPieceSize, strings.NewReader(strings.Repeat("j", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + pieces := []abi.PieceInfo{p1, p2} + fmt.Printf("RU\n") + startRU := time.Now() + out, err := m.ReplicaUpdate(ctx, sid, pieces) + require.NoError(t, err) + fmt.Printf("RU duration (%s): %s\n", ss.ShortString(), time.Since(startRU)) + + updateProofType, err := sid.ProofType.RegisteredUpdateProof() + require.NoError(t, err) + require.NotNil(t, out) + fmt.Printf("PR1\n") + startPR1 := time.Now() + vanillaProofs, err := m.ProveReplicaUpdate1(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed) + require.NoError(t, err) + require.NotNil(t, vanillaProofs) + fmt.Printf("PR1 duration (%s): %s\n", ss.ShortString(), time.Since(startPR1)) + fmt.Printf("PR2\n") + startPR2 := time.Now() + proof, err := m.ProveReplicaUpdate2(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed, vanillaProofs) + require.NoError(t, err) + require.NotNil(t, proof) + fmt.Printf("PR2 duration (%s): %s\n", ss.ShortString(), time.Since(startPR2)) + + vInfo := proof7.ReplicaUpdateInfo{ + Proof: proof, + UpdateProofType: updateProofType, + OldSealedSectorCID: sectorKey, + NewSealedSectorCID: out.NewSealed, + NewUnsealedSectorCID: out.NewUnsealed, + } + pass, err := ffiwrapper.ProofVerifier.VerifyReplicaUpdate(vInfo) + require.NoError(t, err) + assert.True(t, pass) + + fmt.Printf("Decode\n") + // Remove unsealed data and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, nil)) + startDecode := time.Now() + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + fmt.Printf("Decode duration (%s): %s\n", ss.ShortString(), time.Since(startDecode)) + + // Remove just the first piece and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, []storage.Range{{Offset: p1.Size.Unpadded(), Size: p2.Size.Unpadded()}})) + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + + fmt.Printf("GSK\n") + require.NoError(t, m.ReleaseSectorKey(ctx, sid)) + startGSK := time.Now() + require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed)) + fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK)) + +} + func TestRedoPC1(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 8eaed54f6..c99af89e7 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -10,7 +10,7 @@ import ( "math/rand" "sync" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/dagstore/mount" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" @@ -37,7 +37,7 @@ type SectorMgr struct { } type mockVerifProver struct { - aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies + aggregates map[string]proof.AggregateSealVerifyProofAndInfos // used for logging bad verifies } func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { @@ -262,6 +262,28 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, ph return out[:], nil } +func (mgr *SectorMgr) ReplicaUpdate(ctx context.Context, sid storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + out := storage.ReplicaUpdateOut{} + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + out := make([][]byte, 0) + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + return make([]byte, 0), nil +} + +func (mgr *SectorMgr) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + return nil +} + +func (mgr *SectorMgr) ReleaseSealed(ctx context.Context, sid storage.SectorRef) error { + return nil +} + // Test Instrumentation Methods func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error { @@ -312,14 +334,23 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { mgr.lk.Lock() defer mgr.lk.Unlock() + sectorInfo := make([]proof.SectorInfo, len(xSectorInfo)) + for i, xssi := range xSectorInfo { + sectorInfo[i] = proof.SectorInfo{ + SealProof: xssi.SealProof, + SectorNumber: xssi.SectorNumber, + SealedCID: xssi.SealedCID, + } + } + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -327,22 +358,22 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, nil, xerrors.Errorf("failed to post (mock)") } - si := make([]proof5.SectorInfo, 0, len(sectorInfo)) + si := make([]proof.ExtendedSectorInfo, 0, len(xSectorInfo)) var skipped []abi.SectorID var err error - for _, info := range sectorInfo { + for _, xsi := range xSectorInfo { sid := abi.SectorID{ Miner: minerID, - Number: info.SectorNumber, + Number: xsi.SectorNumber, } _, found := mgr.sectors[sid] if found && !mgr.sectors[sid].failed && !mgr.sectors[sid].corrupted { - si = append(si, info) + si = append(si, xsi) } else { skipped = append(skipped, sid) err = xerrors.Errorf("skipped some sectors") @@ -353,10 +384,19 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, skipped, err } - return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil + sectorInfo := make([]proof.SectorInfo, len(si)) + for i, xssi := range si { + sectorInfo[i] = proof.SectorInfo{ + SealProof: xssi.SealProof, + SectorNumber: xssi.SectorNumber, + SealedCID: xssi.SealedCID, + } + } + + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte { randomness[31] &= 0x3f hasher := sha256.New() @@ -371,13 +411,13 @@ func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRa } -func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof { +func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof5.PoStProof{ + return []proof.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -441,6 +481,14 @@ func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.Sector return nil } +func (mgr *SectorMgr) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + return nil +} + +func (mgr *SectorMgr) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + return nil +} + func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -467,6 +515,8 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr return bad, nil } +var _ storiface.WorkerReturn = &SectorMgr{} + func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } @@ -511,7 +561,23 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } -func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { +func (mgr *SectorMgr) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateProof, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + panic("not supported") +} + +func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { return false, err @@ -532,7 +598,7 @@ func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (m mockVerifProver) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) { out := make([]byte, m.aggLen(len(aggregate.Infos))) for pi, svi := range aggregate.Infos { for i := 0; i < 32; i++ { @@ -558,7 +624,11 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri return ok, nil } -func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { +func (m mockVerifProver) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) { + return true, nil +} + +func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length for pi, proof := range proofs { for i := range proof[:32] { @@ -600,12 +670,12 @@ func (m mockVerifProver) aggLen(nproofs int) int { } } -func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f return true, nil } -func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } @@ -628,7 +698,7 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, } var MockVerifier = mockVerifProver{ - aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{}, + aggregates: map[string]proof.AggregateSealVerifyProofAndInfos{}, } var MockProver = MockVerifier diff --git a/extern/sector-storage/partialfile/partialfile.go b/extern/sector-storage/partialfile/partialfile.go index 529e889ea..ffc3935ac 100644 --- a/extern/sector-storage/partialfile/partialfile.go +++ b/extern/sector-storage/partialfile/partialfile.go @@ -71,7 +71,7 @@ func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialF err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) if errno, ok := err.(syscall.Errno); ok { if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { - log.Warnf("could not allocated space, ignoring: %v", errno) + log.Warnf("could not allocate space, ignoring: %v", errno) err = nil // log and ignore } } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index 667fabb66..29e791ba3 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -102,6 +102,22 @@ func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (s *schedTestWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, peices []abi.PieceInfo) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index 6d341a4b3..f6104878b 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -13,17 +13,26 @@ const ( TTFetch TaskType = "seal/v0/fetch" TTUnseal TaskType = "seal/v0/unseal" + + TTReplicaUpdate TaskType = "seal/v0/replicaupdate" + TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" + TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" + TTRegenSectorKey TaskType = "seal/v0/regensectorkey" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTFinalize: -2, // most priority + TTRegenSectorKey: 10, // least priority + TTAddPiece: 9, + TTReplicaUpdate: 8, + TTProveReplicaUpdate2: 7, + TTProveReplicaUpdate1: 6, + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ @@ -38,6 +47,11 @@ var shortNames = map[TaskType]string{ TTFetch: "GET", TTUnseal: "UNS", + + TTReplicaUpdate: "RU", + TTProveReplicaUpdate1: "PR1", + TTProveReplicaUpdate2: "PR2", + TTRegenSectorKey: "GSK", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 12cb26a56..a90cdf0b9 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -76,6 +76,7 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) StorageList(ctx context.Context) (map[ID][]Decl, error) } diff --git a/extern/sector-storage/stores/index_locks.go b/extern/sector-storage/stores/index_locks.go index 3a5ff940e..ade437c6b 100644 --- a/extern/sector-storage/stores/index_locks.go +++ b/extern/sector-storage/stores/index_locks.go @@ -2,6 +2,7 @@ package stores import ( "context" + "sort" "sync" "golang.org/x/xerrors" @@ -154,3 +155,32 @@ func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write) } + +func (i *indexLocks) StorageGetLocks(context.Context) (storiface.SectorLocks, error) { + i.lk.Lock() + defer i.lk.Unlock() + + out := storiface.SectorLocks{ + Locks: []storiface.SectorLock{}, + } + + for id, lock := range i.locks { + l := storiface.SectorLock{Sector: id} + + for t, b := range lock.w.All() { + if b { + l.Write[t]++ + } + } + + copy(l.Read[:], lock.r[:]) + + out.Locks = append(out.Locks, l) + } + + sort.Slice(out.Locks, func(i, j int) bool { + return out.Locks[i].Sector.Number < out.Locks[j].Sector.Number + }) + + return out, nil +} diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go index be0eaf967..268148536 100644 --- a/extern/sector-storage/stores/mocks/index.go +++ b/extern/sector-storage/stores/mocks/index.go @@ -110,6 +110,21 @@ func (mr *MockSectorIndexMockRecorder) StorageFindSector(arg0, arg1, arg2, arg3, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), arg0, arg1, arg2, arg3, arg4) } +// StorageGetLocks mocks base method. +func (m *MockSectorIndex) StorageGetLocks(arg0 context.Context) (storiface.SectorLocks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageGetLocks", arg0) + ret0, _ := ret[0].(storiface.SectorLocks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageGetLocks indicates an expected call of StorageGetLocks. +func (mr *MockSectorIndexMockRecorder) StorageGetLocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageGetLocks", reflect.TypeOf((*MockSectorIndex)(nil).StorageGetLocks), arg0) +} + // StorageInfo mocks base method. func (m *MockSectorIndex) StorageInfo(arg0 context.Context, arg1 stores.ID) (stores.StorageInfo, error) { m.ctrl.T.Helper() diff --git a/extern/sector-storage/storiface/filetype.go b/extern/sector-storage/storiface/filetype.go index 2e0999022..83fcadc90 100644 --- a/extern/sector-storage/storiface/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -12,11 +12,13 @@ const ( FTUnsealed SectorFileType = 1 << iota FTSealed FTCache + FTUpdate + FTUpdateCache FileTypes = iota ) -var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache} const ( FTNone SectorFileType = 0 @@ -25,15 +27,21 @@ const ( const FSOverheadDen = 10 var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 141, // 11 layers + D(2x ssize) + C + R + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: FSOverheadDen * 2, + FTCache: 141, // 11 layers + D(2x ssize) + C + R' } +// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size + var FsOverheadFinalized = map[SectorFileType]int{ - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 2, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize??? + FTUpdateCache: FSOverheadDen, + FTCache: 2, } type SectorFileType int @@ -46,6 +54,10 @@ func (t SectorFileType) String() string { return "sealed" case FTCache: return "cache" + case FTUpdate: + return "update" + case FTUpdateCache: + return "update-cache" default: return fmt.Sprintf("", t) } @@ -104,9 +116,11 @@ func (t SectorFileType) All() [FileTypes]bool { type SectorPaths struct { ID abi.SectorID - Unsealed string - Sealed string - Cache string + Unsealed string + Sealed string + Cache string + Update string + UpdateCache string } func ParseSectorID(baseName string) (abi.SectorID, error) { @@ -139,6 +153,10 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { return sps.Sealed case FTCache: return sps.Cache + case FTUpdate: + return sps.Update + case FTUpdateCache: + return sps.UpdateCache } panic("requested unknown path type") @@ -152,5 +170,9 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { sps.Sealed = p case FTCache: sps.Cache = p + case FTUpdate: + sps.Update = p + case FTUpdateCache: + sps.UpdateCache = p } } diff --git a/extern/sector-storage/storiface/storage.go b/extern/sector-storage/storiface/storage.go index e836002d5..624b71d77 100644 --- a/extern/sector-storage/storiface/storage.go +++ b/extern/sector-storage/storiface/storage.go @@ -1,5 +1,7 @@ package storiface +import "github.com/filecoin-project/go-state-types/abi" + type PathType string const ( @@ -13,3 +15,17 @@ const ( AcquireMove AcquireMode = "move" AcquireCopy AcquireMode = "copy" ) + +type Refs struct { + RefCount [FileTypes]uint +} + +type SectorLock struct { + Sector abi.SectorID + Write [FileTypes]uint + Read [FileTypes]uint +} + +type SectorLocks struct { + Locks []SectorLock +} diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index 5889701d0..8bb6a256a 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -121,6 +121,10 @@ type WorkerCalls interface { SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error) + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error) + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (CallID, error) + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (CallID, error) MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) @@ -174,6 +178,10 @@ type WorkerReturn interface { ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error + ReturnReplicaUpdate(ctx context.Context, callID CallID, out storage.ReplicaUpdateOut, err *CallError) error + ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error + ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error + ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index 72b27b154..cb15184be 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -23,11 +23,11 @@ type testExec struct { apch chan chan apres } -func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { +func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { panic("implement me") } -func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) { +func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) { panic("implement me") } @@ -55,10 +55,38 @@ func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + +func (t *testExec) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + +func (t *testExec) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } +func (t *testExec) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + panic("implement me") +} + +func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + panic("implement me") +} + func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index 81b1daee3..dd23278ae 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/google/uuid" + cid "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -67,6 +68,33 @@ func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pie }) } +func (t *testWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + out, err := t.mockSeal.ReplicaUpdate(ctx, sector, pieces) + if err := t.ret.ReturnReplicaUpdate(ctx, ci, out, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + vanillaProofs, err := t.mockSeal.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + if err := t.ret.ReturnProveReplicaUpdate1(ctx, ci, vanillaProofs, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + proof, err := t.mockSeal.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err := t.ret.ReturnProveReplicaUpdate2(ctx, ci, proof, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { t.pc1s++ diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index 3545c50c0..a5f5a0b9d 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -28,7 +28,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} +var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache} type WorkerConfig struct { TaskTypes []sealtasks.TaskType @@ -148,7 +148,6 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor } sid := storiface.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } @@ -163,16 +162,20 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { type ReturnType string const ( - AddPiece ReturnType = "AddPiece" - SealPreCommit1 ReturnType = "SealPreCommit1" - SealPreCommit2 ReturnType = "SealPreCommit2" - SealCommit1 ReturnType = "SealCommit1" - SealCommit2 ReturnType = "SealCommit2" - FinalizeSector ReturnType = "FinalizeSector" - ReleaseUnsealed ReturnType = "ReleaseUnsealed" - MoveStorage ReturnType = "MoveStorage" - UnsealPiece ReturnType = "UnsealPiece" - Fetch ReturnType = "Fetch" + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + ReplicaUpdate ReturnType = "ReplicaUpdate" + ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" + ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" + GenerateSectorKey ReturnType = "GenerateSectorKey" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + Fetch ReturnType = "Fetch" ) // in: func(WorkerReturn, context.Context, CallID, err string) @@ -210,16 +213,20 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ - AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), - SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), - SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), - FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), - UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate), + ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), + ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), + GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { @@ -243,7 +250,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } res, err := work(ctx, ci) - if err != nil { rb, err := json.Marshal(res) if err != nil { @@ -261,7 +267,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } } }() - return ci, nil } @@ -385,6 +390,51 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, }) } +func (l *LocalWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + sealerOut, err := sb.ReplicaUpdate(ctx, sector, pieces) + return sealerOut, err + }) +} + +func (l *LocalWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (l *LocalWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) +} + +func (l *LocalWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, GenerateSectorKey, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return nil, sb.GenerateSectorKeyFromData(ctx, sector, commD) + }) +} + func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { sb, err := l.executor() if err != nil { diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 7a88d9bd4..a1c647422 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -98,7 +98,6 @@ func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid stori wt.lk.Lock() delete(wt.prepared, prepID) } - callID, err := cb() if err != nil { return callID, err @@ -198,4 +197,22 @@ func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, i return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, id, sealtasks.TTUnseal, func() (storiface.CallID, error) { return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) }) } +func (t *trackedWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTReplicaUpdate, func() (storiface.CallID, error) { + return t.Worker.ReplicaUpdate(ctx, sector, pieces) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate1, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate2, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) +} + var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 1dfaf54a5..c1e2b08fa 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -143,7 +143,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{184, 26}); err != nil { + if _, err := w.Write([]byte{184, 32}); err != nil { return err } @@ -573,6 +573,137 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } + // t.CCUpdate (bool) (bool) + if len("CCUpdate") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CCUpdate\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCUpdate"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CCUpdate")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.CCUpdate); err != nil { + return err + } + + // t.CCPieces ([]sealing.Piece) (slice) + if len("CCPieces") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CCPieces\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCPieces"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CCPieces")); err != nil { + return err + } + + if len(t.CCPieces) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.CCPieces was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CCPieces))); err != nil { + return err + } + for _, v := range t.CCPieces { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.UpdateSealed (cid.Cid) (struct) + if len("UpdateSealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdateSealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateSealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdateSealed")); err != nil { + return err + } + + if t.UpdateSealed == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.UpdateSealed); err != nil { + return xerrors.Errorf("failed to write cid field t.UpdateSealed: %w", err) + } + } + + // t.UpdateUnsealed (cid.Cid) (struct) + if len("UpdateUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateUnsealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdateUnsealed")); err != nil { + return err + } + + if t.UpdateUnsealed == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.UpdateUnsealed); err != nil { + return xerrors.Errorf("failed to write cid field t.UpdateUnsealed: %w", err) + } + } + + // t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice) + if len("ReplicaUpdateProof") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateProof"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ReplicaUpdateProof")); err != nil { + return err + } + + if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaUpdateProof))); err != nil { + return err + } + + if _, err := w.Write(t.ReplicaUpdateProof[:]); err != nil { + return err + } + + // t.ReplicaUpdateMessage (cid.Cid) (struct) + if len("ReplicaUpdateMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ReplicaUpdateMessage")); err != nil { + return err + } + + if t.ReplicaUpdateMessage == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.ReplicaUpdateMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.ReplicaUpdateMessage: %w", err) + } + } + // t.FaultReportMsg (cid.Cid) (struct) if len("FaultReportMsg") > cbg.MaxLength { return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long") @@ -1166,6 +1297,145 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { } t.InvalidProofs = uint64(extra) + } + // t.CCUpdate (bool) (bool) + case "CCUpdate": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.CCUpdate = false + case 21: + t.CCUpdate = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CCPieces ([]sealing.Piece) (slice) + case "CCPieces": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.CCPieces: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.CCPieces = make([]Piece, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Piece + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.CCPieces[i] = v + } + + // t.UpdateSealed (cid.Cid) (struct) + case "UpdateSealed": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.UpdateSealed: %w", err) + } + + t.UpdateSealed = &c + } + + } + // t.UpdateUnsealed (cid.Cid) (struct) + case "UpdateUnsealed": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.UpdateUnsealed: %w", err) + } + + t.UpdateUnsealed = &c + } + + } + // t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice) + case "ReplicaUpdateProof": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.ReplicaUpdateProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.ReplicaUpdateProof[:]); err != nil { + return err + } + // t.ReplicaUpdateMessage (cid.Cid) (struct) + case "ReplicaUpdateMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ReplicaUpdateMessage: %w", err) + } + + t.ReplicaUpdateMessage = &c + } + } // t.FaultReportMsg (cid.Cid) (struct) case "FaultReportMsg": diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 74a791fcb..42425e782 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -35,6 +35,9 @@ type ErrInvalidProof struct{ error } type ErrNoPrecommit struct{ error } type ErrCommitWaitFailed struct{ error } +type ErrBadRU struct{ error } +type ErrBadPR struct{ error } + func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { tok, height, err := api.ChainHead(ctx) if err != nil { @@ -187,3 +190,32 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, return nil } + +// check that sector info is good after running a replica update +func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error { + + if err := checkPieces(ctx, maddr, si, api); err != nil { + return err + } + if !si.CCUpdate { + return xerrors.Errorf("replica update on sector not marked for update") + } + + commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok) + if err != nil { + return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + } + if si.UpdateUnsealed == nil || !commD.Equals(*si.UpdateUnsealed) { + return &ErrBadRU{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)} + } + + if si.UpdateSealed == nil { + return &ErrBadRU{xerrors.Errorf("nil sealed cid")} + } + if si.ReplicaUpdateProof == nil { + return ErrBadPR{xerrors.Errorf("nil PR2 proof")} + } + + return nil + +} diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 10bec7e0b..83874e907 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -133,6 +133,44 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorFinalizeFailed{}, FinalizeFailed), ), + // Snap deals + SnapDealsWaitDeals: planOne( + on(SectorAddPiece{}, SnapDealsAddPiece), + on(SectorStartPacking{}, SnapDealsPacking), + ), + SnapDealsAddPiece: planOne( + on(SectorPieceAdded{}, SnapDealsWaitDeals), + apply(SectorStartPacking{}), + apply(SectorAddPiece{}), + on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed), + ), + SnapDealsPacking: planOne( + on(SectorPacked{}, UpdateReplica), + ), + UpdateReplica: planOne( + on(SectorReplicaUpdate{}, ProveReplicaUpdate), + on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + ), + ProveReplicaUpdate: planOne( + on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate), + on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + ), + SubmitReplicaUpdate: planOne( + on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait), + on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed), + ), + ReplicaUpdateWait: planOne( + on(SectorReplicaUpdateLanded{}, FinalizeReplicaUpdate), + on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed), + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + FinalizeReplicaUpdate: planOne( + on(SectorFinalized{}, Proving), + ), // Sealing errors AddPieceFailed: planOne( @@ -188,11 +226,37 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto onReturning(SectorUpdateDealIDs{}), ), + // Snap Deals Errors + SnapDealsAddPieceFailed: planOne( + on(SectorRetryWaitDeals{}, SnapDealsWaitDeals), + apply(SectorStartPacking{}), + apply(SectorAddPiece{}), + ), + SnapDealsDealsExpired: planOne( + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + SnapDealsRecoverDealIDs: planOne( + on(SectorUpdateDealIDs{}, SubmitReplicaUpdate), + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + AbortUpgrade: planOneOrIgnore( + on(SectorRevertUpgradeToProving{}, Proving), + ), + ReplicaUpdateFailed: planOne( + on(SectorRetrySubmitReplicaUpdateWait{}, ReplicaUpdateWait), + on(SectorRetrySubmitReplicaUpdate{}, SubmitReplicaUpdate), + on(SectorRetryReplicaUpdate{}, UpdateReplica), + on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + ), + // Post-seal Proving: planOne( on(SectorFaultReported{}, FaultReported), on(SectorFaulty{}, Faulty), + on(SectorStartCCUpdate{}, SnapDealsWaitDeals), ), Terminating: planOne( on(SectorTerminating{}, TerminateWait), @@ -209,7 +273,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto TerminateFailed: planOne( // SectorTerminating (global) ), - Removing: planOne( + Removing: planOneOrIgnore( on(SectorRemoved{}, Removed), on(SectorRemoveFailed{}, RemoveFailed), ), @@ -355,13 +419,6 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta log.Errorw("update sector stats", "error", err) } - // todo: drop this, use Context iface everywhere - wrapCtx := func(f func(Context, SectorInfo) error) func(statemachine.Context, SectorInfo) error { - return func(ctx statemachine.Context, info SectorInfo) error { - return f(&ctx, info) - } - } - switch state.State { // Happy path case Empty: @@ -403,6 +460,24 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta case FinalizeSector: return m.handleFinalizeSector, processed, nil + // Snap deals updates + case SnapDealsWaitDeals: + return m.handleWaitDeals, processed, nil + case SnapDealsAddPiece: + return m.handleAddPiece, processed, nil + case SnapDealsPacking: + return m.handlePacking, processed, nil + case UpdateReplica: + return m.handleReplicaUpdate, processed, nil + case ProveReplicaUpdate: + return m.handleProveReplicaUpdate, processed, nil + case SubmitReplicaUpdate: + return m.handleSubmitReplicaUpdate, processed, nil + case ReplicaUpdateWait: + return m.handleReplicaUpdateWait, processed, nil + case FinalizeReplicaUpdate: + return m.handleFinalizeReplicaUpdate, processed, nil + // Handled failure modes case AddPieceFailed: return m.handleAddPieceFailed, processed, nil @@ -426,7 +501,20 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta case DealsExpired: return m.handleDealsExpired, processed, nil case RecoverDealIDs: - return wrapCtx(m.HandleRecoverDealIDs), processed, nil + return m.HandleRecoverDealIDs, processed, nil + + // Snap Deals failure modes + case SnapDealsAddPieceFailed: + return m.handleAddPieceFailed, processed, nil + + case SnapDealsDealsExpired: + return m.handleDealsExpiredSnapDeals, processed, nil + case SnapDealsRecoverDealIDs: + return m.handleSnapDealsRecoverDealIDs, processed, nil + case ReplicaUpdateFailed: + return m.handleSubmitReplicaUpdateFailed, processed, nil + case AbortUpgrade: + return m.handleAbortUpgrade, processed, nil // Post-seal case Proving: @@ -642,3 +730,16 @@ func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err e return uint64(len(events)), nil } } + +// planOne but ignores unhandled states without erroring, this prevents the need to handle all possible events creating +// error during forced override +func planOneOrIgnore(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { + f := planOne(ts...) + return func(events []statemachine.Event, state *SectorInfo) (uint64, error) { + cnt, err := f(events, state) + if err != nil { + log.Warnf("planOneOrIgnore: ignoring error from planOne: %s", err) + } + return cnt, nil + } +} diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 650a81799..395c4b94a 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -295,6 +295,46 @@ type SectorFinalizeFailed struct{ error } func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error } func (evt SectorFinalizeFailed) apply(*SectorInfo) {} +// Snap deals // CC update path + +type SectorStartCCUpdate struct{} + +func (evt SectorStartCCUpdate) apply(state *SectorInfo) { + state.CCUpdate = true + // Clear filler piece but remember in case of abort + state.CCPieces = state.Pieces + state.Pieces = nil +} + +type SectorReplicaUpdate struct { + Out storage.ReplicaUpdateOut +} + +func (evt SectorReplicaUpdate) apply(state *SectorInfo) { + state.UpdateSealed = &evt.Out.NewSealed + state.UpdateUnsealed = &evt.Out.NewUnsealed +} + +type SectorProveReplicaUpdate struct { + Proof storage.ReplicaUpdateProof +} + +func (evt SectorProveReplicaUpdate) apply(state *SectorInfo) { + state.ReplicaUpdateProof = evt.Proof +} + +type SectorReplicaUpdateSubmitted struct { + Message cid.Cid +} + +func (evt SectorReplicaUpdateSubmitted) apply(state *SectorInfo) { + state.ReplicaUpdateMessage = &evt.Message +} + +type SectorReplicaUpdateLanded struct{} + +func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {} + // Failed state recovery type SectorRetrySealPreCommit1 struct{} @@ -351,6 +391,60 @@ func (evt SectorUpdateDealIDs) apply(state *SectorInfo) { } } +// Snap Deals failure and recovery + +type SectorRetryReplicaUpdate struct{} + +func (evt SectorRetryReplicaUpdate) apply(state *SectorInfo) {} + +type SectorRetryProveReplicaUpdate struct{} + +func (evt SectorRetryProveReplicaUpdate) apply(state *SectorInfo) {} + +type SectorUpdateReplicaFailed struct{ error } + +func (evt SectorUpdateReplicaFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorUpdateReplicaFailed) apply(state *SectorInfo) {} + +type SectorProveReplicaUpdateFailed struct{ error } + +func (evt SectorProveReplicaUpdateFailed) FormatError(xerrors.Printer) (next error) { + return evt.error +} +func (evt SectorProveReplicaUpdateFailed) apply(state *SectorInfo) {} + +type SectorAbortUpgrade struct{ error } + +func (evt SectorAbortUpgrade) apply(state *SectorInfo) {} +func (evt SectorAbortUpgrade) FormatError(xerrors.Printer) (next error) { + return evt.error +} + +type SectorRevertUpgradeToProving struct{} + +func (evt SectorRevertUpgradeToProving) apply(state *SectorInfo) { + // cleanup sector state so that it is back in proving + state.CCUpdate = false + state.UpdateSealed = nil + state.UpdateUnsealed = nil + state.ReplicaUpdateProof = nil + state.ReplicaUpdateMessage = nil + state.Pieces = state.CCPieces + state.CCPieces = nil +} + +type SectorRetrySubmitReplicaUpdateWait struct{} + +func (evt SectorRetrySubmitReplicaUpdateWait) apply(state *SectorInfo) {} + +type SectorRetrySubmitReplicaUpdate struct{} + +func (evt SectorRetrySubmitReplicaUpdate) apply(state *SectorInfo) {} + +type SectorSubmitReplicaUpdateFailed struct{} + +func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {} + // Faults type SectorFaulty struct{} diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index 60c3a79e2..f3259f0cc 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -59,6 +59,8 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e return ctx.Send(SectorAddPiece{}) }, + number: sector.SectorNumber, + ccUpdate: sector.CCUpdate, } } else { // make sure we're only accounting for pieces which were correctly added @@ -329,6 +331,17 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } +func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error { + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("failed to get current seal proof: %w", err) + } + log.Debug("pieces to sector matching waiting for lock") + m.inputLk.Lock() + defer m.inputLk.Unlock() + return m.updateInput(ctx, sp) +} + // called with m.inputLk func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error { ssize, err := sp.SectorSize() @@ -356,8 +369,33 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e toAssign[proposalCid] = struct{}{} + memo := make(map[abi.SectorNumber]abi.ChainEpoch) + expF := func(sn abi.SectorNumber) (abi.ChainEpoch, error) { + if exp, ok := memo[sn]; ok { + return exp, nil + } + onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{}) + if err != nil { + return 0, err + } + memo[sn] = onChainInfo.Expiration + return onChainInfo.Expiration, nil + } + for id, sector := range m.openSectors { avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used + // check that sector lifetime is long enough to fit deal using latest expiration from on chain + + ok, err := sector.dealFitsInLifetime(piece.deal.DealProposal.EndEpoch, expF) + if err != nil { + log.Errorf("failed to check expiration for cc Update sector %d", sector.number) + continue + } + if !ok { + exp, _ := expF(sector.number) + log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch) + continue + } if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) matches = append(matches, match{ @@ -416,6 +454,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } if len(toAssign) > 0 { + log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors) if err := m.tryCreateDealSector(ctx, sp); err != nil { log.Errorw("Failed to create a new sector for deals", "error", err) } diff --git a/extern/storage-sealing/mocks/api.go b/extern/storage-sealing/mocks/api.go index cc8561dc7..95c222ecd 100644 --- a/extern/storage-sealing/mocks/api.go +++ b/extern/storage-sealing/mocks/api.go @@ -213,6 +213,21 @@ func (mr *MockSealingAPIMockRecorder) StateMarketStorageDealProposal(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDealProposal", reflect.TypeOf((*MockSealingAPI)(nil).StateMarketStorageDealProposal), arg0, arg1, arg2) } +// StateMinerActiveSectors mocks base method. +func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockSealingAPIMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockSealingAPI)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + // StateMinerAvailableBalance mocks base method. func (m *MockSealingAPI) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { m.ctrl.T.Helper() diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 583bed052..81f6b38e9 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -63,6 +63,7 @@ type SealingAPI interface { StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) + StateMinerActiveSectors(context.Context, address.Address, TipSetToken) ([]*miner.SectorOnChainInfo, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) @@ -121,11 +122,24 @@ type Sealing struct { } type openSector struct { - used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors + used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors + number abi.SectorNumber + ccUpdate bool maybeAccept func(cid.Cid) error // called with inputLk } +func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF func(sn abi.SectorNumber) (abi.ChainEpoch, error)) (bool, error) { + if !o.ccUpdate { + return true, nil + } + expiration, err := expF(o.number) + if err != nil { + return false, err + } + return expiration >= dealEnd, nil +} + type pendingPiece struct { size abi.UnpaddedPieceSize deal api.PieceDealInfo diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index b606de5ae..ba6df7ff4 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -3,50 +3,65 @@ package sealing type SectorState string var ExistSectorStateList = map[SectorState]struct{}{ - Empty: {}, - WaitDeals: {}, - Packing: {}, - AddPiece: {}, - AddPieceFailed: {}, - GetTicket: {}, - PreCommit1: {}, - PreCommit2: {}, - PreCommitting: {}, - PreCommitWait: {}, - SubmitPreCommitBatch: {}, - PreCommitBatchWait: {}, - WaitSeed: {}, - Committing: {}, - CommitFinalize: {}, - CommitFinalizeFailed: {}, - SubmitCommit: {}, - CommitWait: {}, - SubmitCommitAggregate: {}, - CommitAggregateWait: {}, - FinalizeSector: {}, - Proving: {}, - FailedUnrecoverable: {}, - SealPreCommit1Failed: {}, - SealPreCommit2Failed: {}, - PreCommitFailed: {}, - ComputeProofFailed: {}, - CommitFailed: {}, - PackingFailed: {}, - FinalizeFailed: {}, - DealsExpired: {}, - RecoverDealIDs: {}, - Faulty: {}, - FaultReported: {}, - FaultedFinal: {}, - Terminating: {}, - TerminateWait: {}, - TerminateFinality: {}, - TerminateFailed: {}, - Removing: {}, - RemoveFailed: {}, - Removed: {}, + Empty: {}, + WaitDeals: {}, + Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, + GetTicket: {}, + PreCommit1: {}, + PreCommit2: {}, + PreCommitting: {}, + PreCommitWait: {}, + SubmitPreCommitBatch: {}, + PreCommitBatchWait: {}, + WaitSeed: {}, + Committing: {}, + CommitFinalize: {}, + CommitFinalizeFailed: {}, + SubmitCommit: {}, + CommitWait: {}, + SubmitCommitAggregate: {}, + CommitAggregateWait: {}, + FinalizeSector: {}, + Proving: {}, + FailedUnrecoverable: {}, + SealPreCommit1Failed: {}, + SealPreCommit2Failed: {}, + PreCommitFailed: {}, + ComputeProofFailed: {}, + CommitFailed: {}, + PackingFailed: {}, + FinalizeFailed: {}, + DealsExpired: {}, + RecoverDealIDs: {}, + Faulty: {}, + FaultReported: {}, + FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, + Removing: {}, + RemoveFailed: {}, + Removed: {}, + SnapDealsWaitDeals: {}, + SnapDealsAddPiece: {}, + SnapDealsPacking: {}, + UpdateReplica: {}, + ProveReplicaUpdate: {}, + SubmitReplicaUpdate: {}, + ReplicaUpdateWait: {}, + FinalizeReplicaUpdate: {}, + SnapDealsAddPieceFailed: {}, + SnapDealsDealsExpired: {}, + SnapDealsRecoverDealIDs: {}, + ReplicaUpdateFailed: {}, + AbortUpgrade: {}, } +// cmd/lotus-miner/info.go defines CLI colors corresponding to these states +// update files there when adding new states const ( UndefinedSectorState SectorState = "" @@ -79,6 +94,17 @@ const ( FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" + + // snap deals / cc update + SnapDealsWaitDeals SectorState = "SnapDealsWaitDeals" + SnapDealsAddPiece SectorState = "SnapDealsAddPiece" + SnapDealsPacking SectorState = "SnapDealsPacking" + UpdateReplica SectorState = "UpdateReplica" + ProveReplicaUpdate SectorState = "ProveReplicaUpdate" + SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate" + ReplicaUpdateWait SectorState = "ReplicaUpdateWait" + FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate" + // error modes FailedUnrecoverable SectorState = "FailedUnrecoverable" AddPieceFailed SectorState = "AddPieceFailed" @@ -92,6 +118,13 @@ const ( DealsExpired SectorState = "DealsExpired" RecoverDealIDs SectorState = "RecoverDealIDs" + // snap deals error modes + SnapDealsAddPieceFailed SectorState = "SnapDealsAddPieceFailed" + SnapDealsDealsExpired SectorState = "SnapDealsDealsExpired" + SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs" + AbortUpgrade SectorState = "AbortUpgrade" + ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed" + Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain FaultedFinal SectorState = "FaultedFinal" // fault declared on chain @@ -108,11 +141,11 @@ const ( func toStatState(st SectorState, finEarly bool) statSectorState { switch st { - case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed: + case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed, SnapDealsWaitDeals, SnapDealsAddPiece: return sstStaging - case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector: + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector, SnapDealsPacking, UpdateReplica, ProveReplicaUpdate, FinalizeReplicaUpdate: return sstSealing - case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait: + case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, SubmitReplicaUpdate, ReplicaUpdateWait: if finEarly { // we use statSectorState for throttling storage use. With FinalizeEarly // we can consider sectors in states after CommitFinalize as finalized, so diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index 0c88cc384..0d7c08ce5 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -1,11 +1,13 @@ package sealing import ( + "context" "time" "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -181,6 +183,67 @@ func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector Sect return ctx.Send(SectorRetryComputeProof{}) } +func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sector SectorInfo) error { + if sector.ReplicaUpdateMessage != nil { + mw, err := m.Api.StateSearchMsg(ctx.Context(), *sector.ReplicaUpdateMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + } + + if mw == nil { + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + case exitcode.SysErrOutOfGas: + return ctx.Send(SectorRetrySubmitReplicaUpdate{}) + default: + // something else went wrong + } + } + + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleCommitting: api error, not proceeding: %+v", err) + return nil + } + + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { + switch err.(type) { + case *ErrApi: + log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) + return nil + case *ErrBadRU: + log.Errorf("bad replica update: %+v", err) + return ctx.Send(SectorRetryReplicaUpdate{}) + case *ErrBadPR: + log.Errorf("bad PR1: +%v", err) + return ctx.Send(SectorRetryProveReplicaUpdate{}) + + case *ErrInvalidDeals: + return ctx.Send(SectorInvalidDealIDs{}) + case *ErrExpiredDeals: + return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)}) + default: + log.Errorf("sanity check error, not proceeding: +%v", err) + return xerrors.Errorf("checkPieces sanity check error: %w", err) + } + } + + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetrySubmitReplicaUpdate{}) +} + func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error { tok, _, err := m.Api.ChainHead(ctx.Context()) if err != nil { @@ -319,61 +382,40 @@ func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo return ctx.Send(SectorRemove{}) } -func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { - tok, height, err := m.Api.ChainHead(ctx.Context()) +func (m *Sealing) handleDealsExpiredSnapDeals(ctx statemachine.Context, sector SectorInfo) error { + if !sector.CCUpdate { + // Should be impossible + return xerrors.Errorf("should never reach SnapDealsDealsExpired as a non-CCUpdate sector") + } + + return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("one of upgrade deals expired")}) +} + +func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo) error { + if !sector.CCUpdate { + return xerrors.Errorf("should never reach AbortUpgrade as a non-CCUpdate sector") + } + + // Remove snap deals replica if any + if err := m.sealer.ReleaseReplicaUpgrade(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { + return xerrors.Errorf("removing CC update files from sector storage") + } + return ctx.Send(SectorRevertUpgradeToProving{}) +} + +// failWith is a mutator or global mutator +func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error { + toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) if err != nil { - return xerrors.Errorf("getting chain head: %w", err) + return err } - - var toFix []int - paddingPieces := 0 - - for i, p := range sector.Pieces { - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) - } - paddingPieces++ - continue - } - - proposal, err := m.Api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok) - if err != nil { - log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) - toFix = append(toFix, i) - continue - } - - if proposal.Provider != m.maddr { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, m.maddr) - toFix = append(toFix, i) - continue - } - - if proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) - toFix = append(toFix, i) - continue - } - - if p.Piece.Size != proposal.PieceSize { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize) - toFix = append(toFix, i) - continue - } - - if height >= proposal.StartEpoch { - // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces - // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) - return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height) - } + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + return err } - failed := map[int]error{} updates := map[int]abi.DealID{} + for _, i := range toFix { p := sector.Pieces[i] @@ -381,7 +423,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { // TODO: check if we are in an early enough state try to remove this piece log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) // Not much to do here (and this can only happen for old spacerace sectors) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } var dp *market.DealProposal @@ -416,7 +458,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { if len(failed)+paddingPieces == len(sector.Pieces) { log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } // todo: try to remove bad pieces (hard; see the todo above) @@ -424,9 +466,73 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { // for now removing sectors is probably better than having them stuck in RecoverDealIDs // and expire anyways log.Errorf("removing sector %d: deals expired or unrecoverable: %+v", sector.SectorNumber, merr) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } // Not much to do here, we can't go back in time to commit this sector return ctx.Send(SectorUpdateDealIDs{Updates: updates}) } + +func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { + return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorRemove{}) +} + +func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { + return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{}) +} + +func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { + tok, height, err := api.ChainHead(ctx) + if err != nil { + return nil, 0, xerrors.Errorf("getting chain head: %w", err) + } + + var toFix []int + paddingPieces := 0 + + for i, p := range sector.Pieces { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + if p.DealInfo == nil { + exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) + if !p.Piece.PieceCID.Equals(exp) { + return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) + } + paddingPieces++ + continue + } + + proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok) + if err != nil { + log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) + toFix = append(toFix, i) + continue + } + + if proposal.Provider != maddr { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, maddr) + toFix = append(toFix, i) + continue + } + + if proposal.PieceCID != p.Piece.PieceCID { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) + toFix = append(toFix, i) + continue + } + + if p.Piece.Size != proposal.PieceSize { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize) + toFix = append(toFix, i) + continue + } + + if height >= proposal.StartEpoch { + // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces + // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) + return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height) + } + } + + return toFix, paddingPieces, nil +} diff --git a/extern/storage-sealing/states_failed_test.go b/extern/storage-sealing/states_failed_test.go index 22c245afd..86f69b11f 100644 --- a/extern/storage-sealing/states_failed_test.go +++ b/extern/storage-sealing/states_failed_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/filecoin-project/go-state-types/network" + statemachine "github.com/filecoin-project/go-statemachine" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" @@ -25,6 +26,7 @@ import ( ) func TestStateRecoverDealIDs(t *testing.T) { + t.Skip("Bring this back when we can correctly mock a state machine context: Issue #7867") mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() @@ -40,7 +42,7 @@ func TestStateRecoverDealIDs(t *testing.T) { sctx := mocks.NewMockContext(mockCtrl) sctx.EXPECT().Context().AnyTimes().Return(ctx) - api.EXPECT().ChainHead(ctx).Times(1).Return(nil, abi.ChainEpoch(10), nil) + api.EXPECT().ChainHead(ctx).Times(2).Return(nil, abi.ChainEpoch(10), nil) var dealId abi.DealID = 12 dealProposal := market.DealProposal{ @@ -70,7 +72,9 @@ func TestStateRecoverDealIDs(t *testing.T) { sctx.EXPECT().Send(sealing.SectorRemove{}).Return(nil) - err := fakeSealing.HandleRecoverDealIDs(sctx, sealing.SectorInfo{ + // TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build. + // https://github.com/filecoin-project/lotus/issues/7867 + err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, sealing.SectorInfo{ Pieces: []sealing.Piece{ { DealInfo: &api2.PieceDealInfo{ diff --git a/extern/storage-sealing/states_replica_update.go b/extern/storage-sealing/states_replica_update.go new file mode 100644 index 000000000..28c5ede0b --- /dev/null +++ b/extern/storage-sealing/states_replica_update.go @@ -0,0 +1,209 @@ +package sealing + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + statemachine "github.com/filecoin-project/go-statemachine" + api "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "golang.org/x/xerrors" +) + +func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + switch err.(type) { + case *ErrApi: + log.Errorf("handleReplicaUpdate: api error, not proceeding: %+v", err) + return nil + case *ErrInvalidDeals: + log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) + return ctx.Send(SectorInvalidDealIDs{}) + case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector? + return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)}) + default: + return xerrors.Errorf("checkPieces sanity check error: %w", err) + } + } + out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos()) + if err != nil { + return ctx.Send(SectorUpdateReplicaFailed{xerrors.Errorf("replica update failed: %w", err)}) + } + return ctx.Send(SectorReplicaUpdate{ + Out: out, + }) +} + +func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + if sector.UpdateSealed == nil || sector.UpdateUnsealed == nil { + return xerrors.Errorf("invalid sector %d with nil UpdateSealed or UpdateUnsealed output", sector.SectorNumber) + } + if sector.CommR == nil { + return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber) + } + vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed) + if err != nil { + return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)}) + } + + proof, err := m.sealer.ProveReplicaUpdate2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed, vanillaProofs) + if err != nil { + return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (2) failed: %w", err)}) + + } + return ctx.Send(SectorProveReplicaUpdate{ + Proof: proof, + }) +} + +func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + sl, err := m.Api.StateSectorPartition(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + updateProof, err := sector.SectorType.RegisteredUpdateProof() + if err != nil { + log.Errorf("failed to get update proof type from seal proof: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + enc := new(bytes.Buffer) + params := &miner.ProveReplicaUpdatesParams{ + Updates: []miner.ReplicaUpdate{ + { + SectorID: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedSectorCID: *sector.UpdateSealed, + Deals: sector.dealIDs(), + UpdateProofType: updateProof, + ReplicaProof: sector.ReplicaUpdateProof, + }, + }, + } + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + sp, err := m.currentSealProof(ctx.Context()) + if err != nil { + log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err) + return nil + } + virtualPCI := miner.SectorPreCommitInfo{ + SealProof: sp, + SectorNumber: sector.SectorNumber, + SealedCID: *sector.UpdateSealed, + //SealRandEpoch: 0, + DealIDs: sector.dealIDs(), + Expiration: onChainInfo.Expiration, + //ReplaceCapacity: false, + //ReplaceSectorDeadline: 0, + //ReplaceSectorPartition: 0, + //ReplaceSectorNumber: 0, + } + + collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, tok) + if err != nil { + return xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + collateral = big.Sub(collateral, onChainInfo.InitialPledge) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + + collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral) + if err != nil { + log.Errorf("collateral send amount failed not proceeding: %+v", err) + return nil + } + + goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) + + mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + + from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + log.Errorf("no good address to send replica update message from: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) +} + +func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector SectorInfo) error { + if sector.ReplicaUpdateMessage == nil { + log.Errorf("handleReplicaUpdateWait: no replica update message cid recorded") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage) + if err != nil { + log.Errorf("handleReplicaUpdateWait: failed to wait for message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + //expected + case exitcode.SysErrInsufficientFunds: + fallthrough + case exitcode.SysErrOutOfGas: + log.Errorf("gas estimator was wrong or out of funds") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + default: + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok) + if err != nil { + log.Errorf("api err failed to get sector info: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + if si == nil { + log.Errorf("api err sector not found") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + if !si.SealedCID.Equals(*sector.UpdateSealed) { + log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID) + return ctx.Send(SectorAbortUpgrade{}) + } + return ctx.Send(SectorReplicaUpdateLanded{}) +} + +func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + return ctx.Send(SectorFinalized{}) +} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index c6cd0bb49..2258250f4 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -280,8 +280,8 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) } // TODO: We should probably invoke this method in most (if not all) state transition failures after handlePreCommitting -func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) { - err := m.MarkForUpgrade(sid) +func (m *Sealing) remarkForUpgrade(ctx context.Context, sid abi.SectorNumber) { + err := m.MarkForUpgrade(ctx, sid) if err != nil { log.Errorf("error re-marking sector %d as for upgrade: %+v", sid, err) } @@ -424,7 +424,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes()) if err != nil { if params.ReplaceCapacity { - m.remarkForUpgrade(params.ReplaceSectorNumber) + m.remarkForUpgrade(ctx.Context(), params.ReplaceSectorNumber) } return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index aeb378f29..db53f43d3 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -73,7 +73,7 @@ type SectorInfo struct { // PreCommit2 CommD *cid.Cid - CommR *cid.Cid + CommR *cid.Cid // SectorKey Proof []byte PreCommitInfo *miner.SectorPreCommitInfo @@ -91,6 +91,14 @@ type SectorInfo struct { CommitMessage *cid.Cid InvalidProofs uint64 // failed proof computations (doesn't validate with proof inputs; can't compute) + // CCUpdate + CCUpdate bool + CCPieces []Piece + UpdateSealed *cid.Cid + UpdateUnsealed *cid.Cid + ReplicaUpdateProof storage.ReplicaUpdateProof + ReplicaUpdateMessage *cid.Cid + // Faults FaultReportMsg *cid.Cid diff --git a/extern/storage-sealing/upgrade_queue.go b/extern/storage-sealing/upgrade_queue.go index 02db41fde..1aacc9c08 100644 --- a/extern/storage-sealing/upgrade_queue.go +++ b/extern/storage-sealing/upgrade_queue.go @@ -4,6 +4,7 @@ import ( "context" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" "golang.org/x/xerrors" @@ -18,7 +19,8 @@ func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool { return found } -func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { +func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { + m.upgradeLk.Lock() defer m.upgradeLk.Unlock() @@ -27,6 +29,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { return xerrors.Errorf("sector %d already marked for upgrade", id) } + si, err := m.GetSectorInfo(id) + if err != nil { + return xerrors.Errorf("getting sector info: %w", err) + } + if si.State != Proving { + return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade") + } + if len(si.Pieces) != 1 { + return xerrors.Errorf("not a committed-capacity sector, expected 1 piece") + } + if si.Pieces[0].DealInfo != nil { + return xerrors.Errorf("not a committed-capacity sector, has deals") + } + + m.toUpgrade[id] = struct{}{} + + return nil +} + +func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting storage config: %w", err) + } + + curStaging := m.stats.curStaging() + if cfg.MaxWaitDealsSectors > 0 && curStaging >= cfg.MaxWaitDealsSectors { + return xerrors.Errorf("already waiting for deals in %d >= %d (cfg.MaxWaitDealsSectors) sectors, no free resources to wait for deals in another", + curStaging, cfg.MaxWaitDealsSectors) + } + si, err := m.GetSectorInfo(id) if err != nil { return xerrors.Errorf("getting sector info: %w", err) @@ -44,11 +77,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { return xerrors.Errorf("not a committed-capacity sector, has deals") } - // TODO: more checks to match actor constraints + tok, head, err := m.Api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("couldnt get chain head: %w", err) + } + onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, id, tok) + if err != nil { + return xerrors.Errorf("failed to read sector on chain info: %w", err) + } - m.toUpgrade[id] = struct{}{} + active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok) + if err != nil { + return xerrors.Errorf("failed to check active sectors: %w", err) + } + // Ensure the upgraded sector is active + var found bool + for _, si := range active { + if si.SectorNumber == id { + found = true + break + } + } + if !found { + return xerrors.Errorf("cannot mark inactive sector for upgrade") + } - return nil + if onChainInfo.Expiration-head < market7.DealMinDuration { + return xerrors.Errorf("pointless to upgrade sector %d, expiration %d is less than a min deal duration away from current epoch."+ + "Upgrade expiration before marking for upgrade", id, onChainInfo.Expiration) + } + + return m.sectors.Send(uint64(id), SectorStartCCUpdate{}) } func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int { diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json index e26b1b28f..ef97db651 100644 --- a/gen/inlinegen-data.json +++ b/gen/inlinegen-data.json @@ -1,7 +1,7 @@ { - "actorVersions": [0, 2, 3, 4, 5, 6], - "latestActorsVersion": 6, + "actorVersions": [0, 2, 3, 4, 5, 6, 7], + "latestActorsVersion": 7, - "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], - "latestNetworkVersion": 14 + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + "latestNetworkVersion": 15 } diff --git a/go.mod b/go.mod index 15586dc89..97c8134f4 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/filecoin-project/lotus go 1.16 require ( - contrib.go.opencensus.io/exporter/jaeger v0.2.1 contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/BurntSushi/toml v0.4.1 github.com/GeertJohan/go.rice v1.0.2 @@ -14,48 +13,50 @@ require ( github.com/alecthomas/jsonschema v0.0.0-20200530073317-71f438968921 github.com/buger/goterm v1.0.3 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 github.com/coreos/go-systemd/v22 v22.3.2 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e - github.com/dgraph-io/badger/v2 v2.2007.2 + github.com/dgraph-io/badger/v2 v2.2007.3 + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/docker/go-units v0.4.0 - github.com/drand/drand v1.2.1 - github.com/drand/kyber v1.1.4 + github.com/drand/drand v1.3.0 + github.com/drand/kyber v1.1.7 github.com/dustin/go-humanize v1.0.0 github.com/elastic/go-sysinfo v1.7.0 github.com/elastic/gosigar v0.14.1 github.com/etclabscore/go-openrpc-reflect v0.0.36 github.com/fatih/color v1.13.0 - github.com/filecoin-project/dagstore v0.4.3 + github.com/filecoin-project/dagstore v0.4.4 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f github.com/filecoin-project/go-address v0.0.6 github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 - github.com/filecoin-project/go-commp-utils v0.1.2 + github.com/filecoin-project/go-commp-utils v0.1.3 github.com/filecoin-project/go-crypto v0.0.1 - github.com/filecoin-project/go-data-transfer v1.11.4 + github.com/filecoin-project/go-data-transfer v1.12.1 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 - github.com/filecoin-project/go-fil-markets v1.13.4 + github.com/filecoin-project/go-fil-markets v1.14.1 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-padreader v0.0.1 - github.com/filecoin-project/go-paramfetch v0.0.2 + github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 github.com/filecoin-project/go-state-types v0.1.1 github.com/filecoin-project/go-statemachine v1.0.1 - github.com/filecoin-project/go-statestore v0.1.1 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/filecoin-project/go-statestore v0.2.0 + github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/specs-actors v0.9.14 - github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v2 v2.3.6 github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v5 v5.0.4 github.com/filecoin-project/specs-actors/v6 v6.0.1 - github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 + github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a + github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 github.com/filecoin-project/test-vectors/schema v0.0.5 github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.12.0 + github.com/golang/glog v1.0.0 // indirect github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.7.4 @@ -67,60 +68,60 @@ require ( github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/go-bitswap v0.3.4 + github.com/ipfs/go-bitswap v0.5.1 github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-blockservice v0.1.7 + github.com/ipfs/go-blockservice v0.2.1 github.com/ipfs/go-cid v0.1.0 github.com/ipfs/go-cidutil v0.0.2 - github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e - github.com/ipfs/go-ds-leveldb v0.4.2 - github.com/ipfs/go-ds-measure v0.1.0 - github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ds-badger2 v0.1.2 + github.com/ipfs/go-ds-leveldb v0.5.0 + github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.10.6 - github.com/ipfs/go-ipfs-blockstore v1.0.4 + github.com/ipfs/go-graphsync v0.11.5 + github.com/ipfs/go-ipfs-blockstore v1.1.2 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 - github.com/ipfs/go-ipfs-ds-help v1.0.0 - github.com/ipfs/go-ipfs-exchange-interface v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-interface v0.1.0 + github.com/ipfs/go-ipfs-exchange-offline v0.1.1 github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipfs-http-client v0.0.6 - github.com/ipfs/go-ipfs-routing v0.1.0 + github.com/ipfs/go-ipfs-routing v0.2.1 github.com/ipfs/go-ipfs-util v0.0.2 - github.com/ipfs/go-ipld-cbor v0.0.5 + github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.2.0 github.com/ipfs/go-ipld-legacy v0.1.1 // indirect - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.4.1 + github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 github.com/ipfs/go-unixfs v0.2.6 github.com/ipfs/interface-go-ipfs-core v0.4.0 - github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 - github.com/ipld/go-car/v2 v2.1.0 + github.com/ipld/go-car v0.3.3 + github.com/ipld/go-car/v2 v2.1.1 github.com/ipld/go-codec-dagpb v1.3.0 - github.com/ipld/go-ipld-prime v0.14.2 + github.com/ipld/go-ipld-prime v0.14.3 github.com/ipld/go-ipld-selector-text-lite v0.0.1 + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.9.0 - github.com/libp2p/go-libp2p-discovery v0.5.1 - github.com/libp2p/go-libp2p-kad-dht v0.13.0 - github.com/libp2p/go-libp2p-noise v0.2.2 - github.com/libp2p/go-libp2p-peerstore v0.3.0 - github.com/libp2p/go-libp2p-pubsub v0.5.6 - github.com/libp2p/go-libp2p-quic-transport v0.11.2 + github.com/libp2p/go-libp2p v0.17.0 + github.com/libp2p/go-libp2p-connmgr v0.3.0 + github.com/libp2p/go-libp2p-core v0.13.0 + github.com/libp2p/go-libp2p-discovery v0.6.0 + github.com/libp2p/go-libp2p-kad-dht v0.15.0 + github.com/libp2p/go-libp2p-noise v0.3.0 + github.com/libp2p/go-libp2p-peerstore v0.6.0 + github.com/libp2p/go-libp2p-pubsub v0.6.0 + github.com/libp2p/go-libp2p-quic-transport v0.15.2 github.com/libp2p/go-libp2p-record v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.5.3 - github.com/libp2p/go-libp2p-tls v0.2.0 - github.com/libp2p/go-libp2p-yamux v0.5.4 + github.com/libp2p/go-libp2p-swarm v0.9.0 + github.com/libp2p/go-libp2p-tls v0.3.1 + github.com/libp2p/go-libp2p-yamux v0.7.0 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-isatty v0.0.14 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 @@ -136,10 +137,11 @@ require ( github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e github.com/prometheus/client_golang v1.11.0 github.com/raulk/clock v1.1.0 - github.com/raulk/go-watchdog v1.0.1 + github.com/raulk/go-watchdog v1.2.0 github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 + github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 @@ -148,6 +150,10 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.23.0 + go.opentelemetry.io/otel v1.3.0 + go.opentelemetry.io/otel/bridge/opencensus v0.25.0 + go.opentelemetry.io/otel/exporters/jaeger v1.2.0 + go.opentelemetry.io/otel/sdk v1.2.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.7.0 @@ -157,7 +163,7 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20211209171907-798191bca915 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.7 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible @@ -167,3 +173,7 @@ require ( replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors + +//replace github.com/filecoin-project/specs-actors/v7 => /Users/zenground0/pl/repos/specs-actors + +// replace github.com/filecon-project/specs-storage => /Users/zenground0/pl/repos/specs-storage diff --git a/go.sum b/go.sum index 12fe4eabf..36302c0d7 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= -contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -117,11 +115,11 @@ github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -159,8 +157,6 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -185,15 +181,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= @@ -232,7 +219,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -244,13 +230,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -258,12 +243,13 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -302,9 +288,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= @@ -321,26 +307,24 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.2 h1:SKLRuGdx/6WlolaWKaUzzUYWGGePuARyO4guxOPxvt4= -github.com/filecoin-project/go-commp-utils v0.1.2/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.11.4 h1:jKvlx0/C8HSyLRn/G1P9TjtfBtFU9jbCvCVFmWbyYVQ= -github.com/filecoin-project/go-data-transfer v1.11.4/go.mod h1:2MitLI0ebCkLlPKM7NRggP/t9d+gCcREUKkCKqWRCwU= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= +github.com/filecoin-project/go-data-transfer v1.12.1 h1:gAznAZKySVs2FS6T/vDq7R3f0DewLnxeROe0oOE6bZU= +github.com/filecoin-project/go-data-transfer v1.12.1/go.mod h1:j3HL645YiQFxcM+q7uPlGApILSqeweDABNgZQP7pDYU= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.13.4 h1:NAu+ACelR2mYsj+yJ4iLu8FGqWK50OnU5VF8axkLsSc= -github.com/filecoin-project/go-fil-markets v1.13.4/go.mod h1:aANjXD2XMHWnT2zWpyGWLsWLC24C4mHm0gRm85OpPWE= +github.com/filecoin-project/go-fil-markets v1.14.1 h1:Bx+TSbkAN8K97Hpjgu+MpeRFbXIKH/fNpNp1ZGAEH3I= +github.com/filecoin-project/go-fil-markets v1.14.1/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -350,15 +334,13 @@ github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AG github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= -github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -370,36 +352,36 @@ github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/g github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a h1:MS1mtAhZh0iSE7OxP1bb6+UNyYKsxg8n51FpHlX1d54= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -420,9 +402,6 @@ github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdk github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -444,6 +423,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -489,6 +473,8 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -526,7 +512,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -594,7 +579,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -674,20 +658,16 @@ github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyq github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= -github.com/ipfs/go-blockservice v0.1.7 h1:yVe9te0M7ow8i+PPkx03YFSpxqzXx594d6h+34D6qMg= -github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -697,7 +677,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -706,15 +685,15 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -722,41 +701,33 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 h1:W3YMLEvOXqdW+sYMiguhWP6txJwQvIQqhvpU8yAMGQs= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-graphsync v0.10.6 h1:GkYan4EoDslceHaqYo/hxktWtuZ7VmsyRXLdSmoCcBQ= -github.com/ipfs/go-graphsync v0.10.6/go.mod h1:tQMjWNDD/vSz80YLT/VvzrUmy58aF9lR1uCwSLzjWzI= -github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= +github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4= +github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -771,12 +742,15 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -790,17 +764,19 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= @@ -827,15 +803,15 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.4.1 h1:CEEQZnwRkszN06oezuasHwDD823Xcr4p4zluUN9vXqs= -github.com/ipfs/go-merkledag v0.4.1/go.mod h1:56biPaS6e+IS0eXkEt6A8tG+BUQaEIFqDqJuFfQDBoE= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= @@ -844,10 +820,9 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.6.0 h1:BT1/PuNViVomiz1PnnP5+WmKsTNHrxIDvkZrkj4JhOg= -github.com/ipfs/go-peertaskqueue v0.6.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -862,33 +837,28 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrpVwEdoX4KoxiTEX8cZG97v/hTDw= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.1.0 h1:t8R/WXUSkfu1K1gpPk76mytCxsEdMjGcMIgpOq3/Cnw= -github.com/ipld/go-car/v2 v2.1.0/go.mod h1:Xr6GwkDhv8dtOtgHzOynAkIOg0t0YiPc5DxBPppWqZA= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= -github.com/ipld/go-ipld-prime v0.14.2 h1:P5fO2usnisXwrN/1sR5exCgEvINg/w/27EuYPKB/zx8= -github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= +github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= @@ -898,7 +868,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -920,8 +889,9 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -964,7 +934,6 @@ github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -997,8 +966,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1012,53 +982,48 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= -github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1088,8 +1053,12 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1098,10 +1067,9 @@ github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2T github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1109,8 +1077,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1129,17 +1097,17 @@ github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aD github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1151,26 +1119,26 @@ github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVd github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= -github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.3.0 h1:wp/G0+37+GLr7tu+wE+4GWNrA3uxKg6IPRigIMSS5oQ= -github.com/libp2p/go-libp2p-peerstore v0.3.0/go.mod h1:fNX9WlOENMvdx/YD7YO/5Hkrn8+lQIk5A39BHa1HIrM= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.5.6 h1:YkO3gG9J1mQBEMRrM5obiG3JD0L8RcrzIpoeLeiYqH8= -github.com/libp2p/go-libp2p-pubsub v0.5.6/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= -github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1191,14 +1159,13 @@ github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evl github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= -github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1208,22 +1175,26 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1235,10 +1206,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= -github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1255,12 +1226,14 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1273,13 +1246,15 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1294,11 +1269,11 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1306,7 +1281,6 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= github.com/libp2p/go-ws-transport v0.5.0 h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo= github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= @@ -1316,23 +1290,22 @@ github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= -github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1348,19 +1321,17 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1400,7 +1371,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1481,12 +1451,9 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= @@ -1504,7 +1471,6 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1545,7 +1511,6 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1556,7 +1521,6 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= @@ -1619,7 +1583,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1656,7 +1619,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1666,8 +1628,8 @@ github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1765,13 +1727,13 @@ github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5J github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -1828,7 +1790,6 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -1877,6 +1838,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -1906,13 +1868,34 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0 h1:18Ww8TpCEGes12HZJzB2nEbUglvMLzPxqgZypsrKiNc= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0 h1:C/5Egj3MJBXRJi22cSl07suqPqtZLnLFmH//OxETUEc= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0 h1:w/7RXe16WdPylaIXDgcYM6t/q0K5lXgSdZOEbIEyliE= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0 h1:7cXOnCADUsR3+EOqxPaSKwhEuNu0gz/56dRN1hpIdKw= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0 h1:6UjAFmVB5Fza3K5qUJpYWGrk8QMPIqlSnya5FI46VBY= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0 h1:J+Ta+4IAA5W9AdWhGQLfciEpavBqqSkBzTDeYvJLFNU= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1976,11 +1959,9 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2013,7 +1994,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2044,7 +2024,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2062,7 +2041,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2079,7 +2057,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2091,10 +2068,8 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2110,6 +2085,7 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= @@ -2200,9 +2176,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2227,6 +2201,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2234,6 +2209,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= @@ -2318,16 +2294,15 @@ golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2356,7 +2331,6 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2398,7 +2372,6 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index c5b380835..b63e96d24 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -6,10 +6,14 @@ import ( "testing" "time" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -28,37 +32,43 @@ func TestCCUpgrade(t *testing.T) { } } -func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { +func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode { ctx := context.Background() - blockTime := 5 * time.Millisecond + blockTime := 1 * time.Millisecond - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.LatestActorsAt(upgradeHeight)) - ens.InterconnectAll().BeginMining(blockTime) + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMiningMustPost(blockTime) maddr, err := miner.ActorAddress(ctx) if err != nil { t.Fatal(err) } - CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) - Upgraded := CC + 1 + CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + fmt.Printf("CCUpgrade: %d\n", CCUpgrade) + // wait for deadline 0 to pass so that committing starts after post on preseals + // this gives max time for post to complete minimizing chances of timeout + // waitForDeadline(ctx, t, 1, client, maddr) miner.PledgeSectors(ctx, 1, 0, nil) - sl, err := miner.SectorsList(ctx) require.NoError(t, err) require.Len(t, sl, 1, "expected 1 sector") - require.Equal(t, CC, sl[0], "unexpected sector number") - + require.Equal(t, CCUpgrade, sl[0], "unexpected sector number") { - si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) + si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK) require.NoError(t, err) require.Less(t, 50000, int(si.Expiration)) } + waitForSectorActive(ctx, t, CCUpgrade, client, maddr) - err = miner.SectorMarkForUpgrade(ctx, sl[0]) + err = miner.SectorMarkForUpgrade(ctx, sl[0], true) require.NoError(t, err) + sl, err = miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + dh := kit.NewDealHarness(t, client, miner, miner) deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ Rseed: 6, @@ -67,37 +77,112 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) kit.AssertFilesEqual(t, inPath, outPath) - // Validate upgrade - - { - exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) - if err != nil { - require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up - } else { - require.NoError(t, err) - require.NotNil(t, exp) - require.Greater(t, 50000, int(exp.OnTime)) - } - } - { - exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) - require.NoError(t, err) - require.Less(t, 50000, int(exp.OnTime)) - } - - dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) + status, err := miner.SectorsStatus(ctx, CCUpgrade, true) require.NoError(t, err) + assert.Equal(t, 1, len(status.Deals)) + return client +} - // Sector should expire. +func waitForDeadline(ctx context.Context, t *testing.T, waitIdx uint64, node *kit.TestFullNode, maddr address.Address) { for { - // Wait for the sector to expire. - status, err := miner.SectorsStatus(ctx, CC, true) + ts, err := node.ChainHead(ctx) require.NoError(t, err) - if status.OnTime == 0 && status.Early == 0 { - break + dl, err := node.StateMinerProvingDeadline(ctx, maddr, ts.Key()) + require.NoError(t, err) + if dl.Index == waitIdx { + return } - t.Log("waiting for sector to expire") - // wait one deadline per loop. - time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime) } } + +func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, node *kit.TestFullNode, maddr address.Address) { + for { + active, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + for _, si := range active { + if si.SectorNumber == sn { + fmt.Printf("ACTIVE\n") + return + } + } + + time.Sleep(time.Second) + } +} + +func waitForSectorStartUpgrade(ctx context.Context, t *testing.T, sn abi.SectorNumber, miner *kit.TestMiner) { + for { + si, err := miner.StorageMiner.SectorsStatus(ctx, sn, false) + require.NoError(t, err) + if si.State != api.SectorState("Proving") { + t.Logf("Done proving sector in state: %s", si.State) + return + } + + } +} + +func TestCCUpgradeAndPoSt(t *testing.T) { + kit.QuietMiningLogs() + t.Run("upgrade and then post", func(t *testing.T) { + ctx := context.Background() + n := runTestCCUpgrade(t, 100) + ts, err := n.ChainHead(ctx) + require.NoError(t, err) + start := ts.Height() + // wait for a full proving period + t.Log("waiting for chain") + + n.WaitTillChain(ctx, func(ts *types.TipSet) bool { + if ts.Height() > start+abi.ChainEpoch(2880) { + return true + } + return false + }) + }) +} + +func TestTooManyMarkedForUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + ctx := context.Background() + blockTime := 1 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMiningMustPost(blockTime) + + maddr, err := miner.ActorAddress(ctx) + if err != nil { + t.Fatal(err) + } + + CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + waitForDeadline(ctx, t, 1, client, maddr) + miner.PledgeSectors(ctx, 3, 0, nil) + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 3, "expected 3 sectors") + + { + si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(si.Expiration)) + } + + waitForSectorActive(ctx, t, CCUpgrade, client, maddr) + waitForSectorActive(ctx, t, CCUpgrade+1, client, maddr) + waitForSectorActive(ctx, t, CCUpgrade+2, client, maddr) + + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade, true) + require.NoError(t, err) + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+1, true) + require.NoError(t, err) + + waitForSectorStartUpgrade(ctx, t, CCUpgrade, miner) + waitForSectorStartUpgrade(ctx, t, CCUpgrade+1, miner) + + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+2, true) + require.Error(t, err) + assert.Contains(t, err.Error(), "no free resources to wait for deals") +} diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index c0458e8d1..49a8bb008 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -139,7 +139,7 @@ func TestSimultanenousTransferLimit(t *testing.T) { ) runTest := func(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, graphsyncThrottle))), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, 0, graphsyncThrottle))), node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle, graphsyncThrottle)), )) ens.InterconnectAll().BeginMining(250 * time.Millisecond) diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go index 2c9bd47c6..91ddc2e26 100644 --- a/itests/kit/blockminer.go +++ b/itests/kit/blockminer.go @@ -1,13 +1,18 @@ package kit import ( + "bytes" "context" "sync" "sync/atomic" "testing" "time" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + aminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/miner" "github.com/stretchr/testify/require" ) @@ -30,6 +35,176 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { } } +type partitionTracker struct { + partitions []api.Partition + posted bitfield.BitField +} + +func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker { + dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK) + require.NoError(bm.t, err) + dl := dlines[dlIdx] + + parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK) + require.NoError(bm.t, err) + return &partitionTracker{ + partitions: parts, + posted: dl.PostSubmissions, + } +} + +func (p *partitionTracker) count(t *testing.T) uint64 { + pCnt, err := p.posted.Count() + require.NoError(t, err) + return pCnt +} + +func (p *partitionTracker) done(t *testing.T) bool { + return uint64(len(p.partitions)) == p.count(t) +} + +func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, smsg *types.SignedMessage) (ret bool) { + defer func() { + ret = p.done(t) + }() + msg := smsg.Message + if !(msg.To == bm.miner.ActorAddr) { + return + } + if msg.Method != aminer.Methods.SubmitWindowedPoSt { + return + } + params := aminer.SubmitWindowedPoStParams{} + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params))) + for _, part := range params.Partitions { + p.posted.Set(part.Index) + } + return +} + +// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool +// and everything shuts down if a post fails. It also enforces that every block mined succeeds +func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) { + + time.Sleep(3 * time.Second) + + // wrap context in a cancellable context. + ctx, bm.cancel = context.WithCancel(ctx) + bm.wg.Add(1) + go func() { + defer bm.wg.Done() + + activeDeadlines := make(map[int]struct{}) + _ = activeDeadlines + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + wait := make(chan bool) + chg, err := bm.miner.FullNode.ChainNotify(ctx) + require.NoError(bm.t, err) + // read current out + curr := <-chg + require.Equal(bm.t, ts.Height(), curr[0].Val.Height()) + for { + select { + case <-time.After(blocktime): + case <-ctx.Done(): + return + } + nulls := atomic.SwapInt64(&bm.nextNulls, 0) + require.Equal(bm.t, int64(0), nulls, "Injecting > 0 null blocks while `MustPost` mining is currently unsupported") + + // Wake up and figure out if we are at the end of an active deadline + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + tsk := ts.Key() + + dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, tsk) + require.NoError(bm.t, err) + if ts.Height()+1 == dlinfo.Last() { // Last epoch in dline, we need to check that miner has posted + + tracker := newPartitionTracker(ctx, dlinfo.Index, bm) + if !tracker.done(bm.t) { // need to wait for post + bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t)) + poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) + require.NoError(bm.t, err) + + // First check pending messages we'll mine this epoch + msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK) + require.NoError(bm.t, err) + for _, msg := range msgs { + tracker.recordIfPost(bm.t, bm, msg) + } + + // post not yet in mpool, wait for it + if !tracker.done(bm.t) { + bm.t.Logf("post missing from mpool, block mining suspended until it arrives") + POOL: + for { + bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key()) + select { + case <-ctx.Done(): + return + case evt := <-poolEvts: + bm.t.Logf("pool event: %d", evt.Type) + if evt.Type == api.MpoolAdd { + bm.t.Logf("incoming message %v", evt.Message) + if tracker.recordIfPost(bm.t, bm, evt.Message) { + break POOL + } + } + } + } + bm.t.Logf("done waiting on mpool") + } + } + } + + var target abi.ChainEpoch + reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) { + require.NoError(bm.t, err) + target = epoch + wait <- success + } + + var success bool + for i := int64(0); !success; i++ { + err = bm.miner.MineOne(ctx, miner.MineReq{ + InjectNulls: abi.ChainEpoch(nulls + i), + Done: reportSuccessFn, + }) + success = <-wait + } + + // Wait until it shows up on the given full nodes ChainHead + // TODO this replicates a flaky condition from MineUntil, + // it would be better to use api to wait for sync, + // but currently this is a bit difficult + // and flaky failure is easy to debug and retry + nloops := 200 + for i := 0; i < nloops; i++ { + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + + if ts.Height() == target { + break + } + + require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node") + time.Sleep(time.Millisecond * 10) + } + + switch { + case err == nil: // wrap around + case ctx.Err() != nil: // context fired. + return + default: // log error + bm.t.Error(err) + } + } + }() + +} + func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) { time.Sleep(time.Second) diff --git a/itests/kit/client.go b/itests/kit/client.go index c9f8946ec..4c20e37c1 100644 --- a/itests/kit/client.go +++ b/itests/kit/client.go @@ -101,9 +101,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) time.Sleep(time.Second) } + // client retrieval-ask --size=1 + out = clientCLI.RunCmd("client", "retrieval-ask", "--size=1", minerAddr.String(), dataCid.String()) + require.Regexp(t, regexp.MustCompile("Ask:"), out) + fmt.Println("retrieval ask:\n", out) + // Retrieve the first file from the Miner // client retrieve - tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client") + tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client") require.NoError(t, err) path := filepath.Join(tmpdir, "outfile.dat") out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) diff --git a/itests/kit/deals.go b/itests/kit/deals.go index 651c15901..f8de14d62 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -104,8 +104,9 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) + fmt.Printf("WAIT DEAL SEALEDS START\n") dh.WaitDealSealed(ctx, deal, false, false, nil) - + fmt.Printf("WAIT DEAL SEALEDS END\n") return deal, res, path } @@ -176,6 +177,7 @@ loop: cb() } } + fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n") } // WaitDealSealedQuiet waits until the deal is sealed, without logging anything. @@ -290,12 +292,11 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { snums, err := dh.main.SectorsList(ctx) require.NoError(dh.t, err) - for _, snum := range snums { si, err := dh.main.SectorsStatus(ctx, snum, false) require.NoError(dh.t, err) - dh.t.Logf("Sector state: %s", si.State) + dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State) if si.State == api.SectorState(sealing.WaitDeals) { require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) } @@ -373,7 +374,7 @@ consumeEvents: func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), file) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), file) require.NoError(dh.t, err) b, err := bserv.GetBlock(ctx, ch.Roots[0]) diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 90a614645..dfd3d8cd7 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -487,7 +487,7 @@ func (n *Ensemble) Start() *Ensemble { ds, err := lr.Datastore(context.TODO(), "/metadata") require.NoError(n.t, err) - err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) + err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) require.NoError(n.t, err) nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) @@ -675,6 +675,43 @@ func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble { return n } +func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner { + ctx := context.Background() + + // wait one second to make sure that nodes are connected and have handshaken. + // TODO make this deterministic by listening to identify events on the + // libp2p eventbus instead (or something else). + time.Sleep(1 * time.Second) + + var bms []*BlockMiner + if len(miners) == 0 { + // no miners have been provided explicitly, instantiate block miners + // for all active miners that aren't still mining. + for _, m := range n.active.miners { + if _, ok := n.active.bms[m]; ok { + continue // skip, already have a block miner + } + miners = append(miners, m) + } + } + + if len(miners) > 1 { + n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners)) + } + + for _, m := range miners { + bm := NewBlockMiner(n.t, m) + bm.MineBlocksMustPost(ctx, blocktime) + n.t.Cleanup(bm.Stop) + + bms = append(bms, bm) + + n.active.bms[m] = bm + } + + return bms +} + // BeginMining kicks off mining for the specified miners. If nil or 0-length, // it will kick off mining for all enrolled and active miners. It also adds a // cleanup function to stop all mining operations on test teardown. diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index 0d7d87e6a..45ed51443 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -49,12 +49,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { }) /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version13, + Network: network.Version14, Height: -1, }, stmgr.Upgrade{ - Network: network.Version14, + Network: network.Version15, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV6, + Migration: filcns.UpgradeActorsV7, }) /* inline-gen end */ } diff --git a/itests/kit/files.go b/itests/kit/files.go index 9babac941..c78352afe 100644 --- a/itests/kit/files.go +++ b/itests/kit/files.go @@ -83,7 +83,7 @@ func CreateRandomCARv1(t *testing.T, rseed, size int) (carV1FilePath string, ori require.NoError(t, car.WriteCar(ctx, dagSvc, []cid.Cid{root}, tmp)) _, err = tmp.Seek(0, io.SeekStart) require.NoError(t, err) - hd, _, err := car.ReadHeader(bufio.NewReader(tmp)) + hd, err := car.ReadHeader(bufio.NewReader(tmp)) require.NoError(t, err) require.EqualValues(t, 1, hd.Version) require.Len(t, hd.Roots, 1) diff --git a/itests/self_sent_txn_test.go b/itests/self_sent_txn_test.go new file mode 100644 index 000000000..b5ec2c0dc --- /dev/null +++ b/itests/self_sent_txn_test.go @@ -0,0 +1,101 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +// these tests check that the versioned code in vm.transfer is functioning correctly across versions! +// we reordered the checks to make sure that a transaction with too much money in it sent to yourself will fail instead of succeeding as a noop +// more info in this PR! https://github.com/filecoin-project/lotus/pull/7637 +func TestSelfSentTxnV15(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client15.WalletSignMessage(ctx, client15.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client15.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client15.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.SysErrInsufficientFunds, mLookup.Receipt.ExitCode) +} + +func TestSelfSentTxnV14(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client14, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version14)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client14.WalletBalance(ctx, client14.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client14.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client14.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client14.WalletSignMessage(ctx, client14.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client14.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client14.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) +} diff --git a/lib/backupds/backupds_test.go b/lib/backupds/backupds_test.go index f7bc36e22..c681491e3 100644 --- a/lib/backupds/backupds_test.go +++ b/lib/backupds/backupds_test.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -17,14 +18,14 @@ const valSize = 512 << 10 func putVals(t *testing.T, ds datastore.Datastore, start, end int) { for i := start; i < end; i++ { - err := ds.Put(datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) + err := ds.Put(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) require.NoError(t, err) } } func checkVals(t *testing.T, ds datastore.Datastore, start, end int, exist bool) { for i := start; i < end; i++ { - v, err := ds.Get(datastore.NewKey(fmt.Sprintf("%d", i))) + v, err := ds.Get(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i))) if exist { require.NoError(t, err) expect := []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize))) @@ -44,7 +45,7 @@ func TestNoLogRestore(t *testing.T) { require.NoError(t, err) var bup bytes.Buffer - require.NoError(t, bds.Backup(&bup)) + require.NoError(t, bds.Backup(context.TODO(), &bup)) putVals(t, ds1, 10, 20) diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go index 350988a50..f0ece10ba 100644 --- a/lib/backupds/datastore.go +++ b/lib/backupds/datastore.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "crypto/sha256" "io" "sync" @@ -52,7 +53,7 @@ func Wrap(child datastore.Batching, logdir string) (*Datastore, error) { // Writes a datastore dump into the provided writer as // [array(*) of [key, value] tuples, checksum] -func (d *Datastore) Backup(out io.Writer) error { +func (d *Datastore) Backup(ctx context.Context, out io.Writer) error { scratch := make([]byte, 9) if err := cbg.WriteMajorTypeHeaderBuf(scratch, out, cbg.MajArray, 2); err != nil { @@ -75,7 +76,7 @@ func (d *Datastore) Backup(out io.Writer) error { log.Info("Starting datastore backup") defer log.Info("Datastore backup done") - qr, err := d.child.Query(query.Query{}) + qr, err := d.child.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query: %w", err) } @@ -132,23 +133,23 @@ func (d *Datastore) Backup(out io.Writer) error { // proxy -func (d *Datastore) Get(key datastore.Key) (value []byte, err error) { - return d.child.Get(key) +func (d *Datastore) Get(ctx context.Context, key datastore.Key) (value []byte, err error) { + return d.child.Get(ctx, key) } -func (d *Datastore) Has(key datastore.Key) (exists bool, err error) { - return d.child.Has(key) +func (d *Datastore) Has(ctx context.Context, key datastore.Key) (exists bool, err error) { + return d.child.Has(ctx, key) } -func (d *Datastore) GetSize(key datastore.Key) (size int, err error) { - return d.child.GetSize(key) +func (d *Datastore) GetSize(ctx context.Context, key datastore.Key) (size int, err error) { + return d.child.GetSize(ctx, key) } -func (d *Datastore) Query(q query.Query) (query.Results, error) { - return d.child.Query(q) +func (d *Datastore) Query(ctx context.Context, q query.Query) (query.Results, error) { + return d.child.Query(ctx, q) } -func (d *Datastore) Put(key datastore.Key, value []byte) error { +func (d *Datastore) Put(ctx context.Context, key datastore.Key, value []byte) error { d.backupLk.RLock() defer d.backupLk.RUnlock() @@ -160,21 +161,21 @@ func (d *Datastore) Put(key datastore.Key, value []byte) error { } } - return d.child.Put(key, value) + return d.child.Put(ctx, key, value) } -func (d *Datastore) Delete(key datastore.Key) error { +func (d *Datastore) Delete(ctx context.Context, key datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Delete(key) + return d.child.Delete(ctx, key) } -func (d *Datastore) Sync(prefix datastore.Key) error { +func (d *Datastore) Sync(ctx context.Context, prefix datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Sync(prefix) + return d.child.Sync(ctx, prefix) } func (d *Datastore) CloseLog() error { @@ -196,8 +197,8 @@ func (d *Datastore) Close() error { ) } -func (d *Datastore) Batch() (datastore.Batch, error) { - b, err := d.child.Batch() +func (d *Datastore) Batch(ctx context.Context) (datastore.Batch, error) { + b, err := d.child.Batch(ctx) if err != nil { return nil, err } @@ -215,7 +216,7 @@ type bbatch struct { rlk sync.Locker } -func (b *bbatch) Put(key datastore.Key, value []byte) error { +func (b *bbatch) Put(ctx context.Context, key datastore.Key, value []byte) error { if b.d.log != nil { b.d.log <- Entry{ Key: []byte(key.String()), @@ -224,18 +225,18 @@ func (b *bbatch) Put(key datastore.Key, value []byte) error { } } - return b.b.Put(key, value) + return b.b.Put(ctx, key, value) } -func (b *bbatch) Delete(key datastore.Key) error { - return b.b.Delete(key) +func (b *bbatch) Delete(ctx context.Context, key datastore.Key) error { + return b.b.Delete(ctx, key) } -func (b *bbatch) Commit() error { +func (b *bbatch) Commit(ctx context.Context) error { b.rlk.Lock() defer b.rlk.Unlock() - return b.b.Commit() + return b.b.Commit(ctx) } var _ datastore.Batch = &bbatch{} diff --git a/lib/backupds/log.go b/lib/backupds/log.go index b76dfbfe6..b89f410f0 100644 --- a/lib/backupds/log.go +++ b/lib/backupds/log.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "fmt" "io" "io/ioutil" @@ -100,6 +101,7 @@ type logfile struct { var compactThresh = 2 func (d *Datastore) createLog(logdir string) (*logfile, string, error) { + ctx := context.TODO() p := filepath.Join(logdir, strconv.FormatInt(time.Now().Unix(), 10)+".log.cbor") log.Infow("creating log", "file", p) @@ -108,7 +110,7 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { return nil, "", err } - if err := d.Backup(f); err != nil { + if err := d.Backup(ctx, f); err != nil { return nil, "", xerrors.Errorf("writing log base: %w", err) } if err := f.Sync(); err != nil { @@ -122,8 +124,9 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { } func (d *Datastore) openLog(p string) (*logfile, string, error) { + ctx := context.TODO() log.Infow("opening log", "file", p) - lh, err := d.child.Get(loghead) + lh, err := d.child.Get(ctx, loghead) if err != nil { return nil, "", xerrors.Errorf("checking log head (logfile '%s'): %w", p, err) } @@ -212,6 +215,7 @@ func (d *Datastore) openLog(p string) (*logfile, string, error) { } func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { + ctx := context.TODO() lval := []byte(fmt.Sprintf("%s;%s;%d", logname, uuid.New(), time.Now().Unix())) err := l.writeEntry(&Entry{ @@ -223,7 +227,7 @@ func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { return xerrors.Errorf("writing loghead to the log: %w", err) } - if err := ds.Put(loghead, lval); err != nil { + if err := ds.Put(ctx, loghead, lval); err != nil { return xerrors.Errorf("writing loghead to the datastore: %w", err) } diff --git a/lib/backupds/read.go b/lib/backupds/read.go index a44442af1..af4f30888 100644 --- a/lib/backupds/read.go +++ b/lib/backupds/read.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "crypto/sha256" "io" "os" @@ -117,13 +118,13 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte, log bool) } func RestoreInto(r io.Reader, dest datastore.Batching) error { - batch, err := dest.Batch() + batch, err := dest.Batch(context.TODO()) if err != nil { return xerrors.Errorf("creating batch: %w", err) } _, err = ReadBackup(r, func(key datastore.Key, value []byte, _ bool) error { - if err := batch.Put(key, value); err != nil { + if err := batch.Put(context.TODO(), key, value); err != nil { return xerrors.Errorf("put key: %w", err) } @@ -133,7 +134,7 @@ func RestoreInto(r io.Reader, dest datastore.Batching) error { return xerrors.Errorf("reading backup: %w", err) } - if err := batch.Commit(); err != nil { + if err := batch.Commit(context.TODO()); err != nil { return xerrors.Errorf("committing batch: %w", err) } diff --git a/lib/tracing/setup.go b/lib/tracing/setup.go index b8c0399ad..d90099f79 100644 --- a/lib/tracing/setup.go +++ b/lib/tracing/setup.go @@ -4,9 +4,16 @@ import ( "os" "strings" - "contrib.go.opencensus.io/exporter/jaeger" + octrace "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.uber.org/zap" + logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" ) var log = logging.Logger("tracing") @@ -14,7 +21,6 @@ var log = logging.Logger("tracing") const ( // environment variable names envCollectorEndpoint = "LOTUS_JAEGER_COLLECTOR_ENDPOINT" - envAgentEndpoint = "LOTUS_JAEGER_AGENT_ENDPOINT" envAgentHost = "LOTUS_JAEGER_AGENT_HOST" envAgentPort = "LOTUS_JAEGER_AGENT_PORT" envJaegerUser = "LOTUS_JAEGER_USERNAME" @@ -26,54 +32,61 @@ const ( // The agent endpoint is a thrift/udp protocol and should be given // as a string like "hostname:port". The agent can also be configured // with separate host and port variables. -func jaegerOptsFromEnv(opts *jaeger.Options) bool { +func jaegerOptsFromEnv() jaeger.EndpointOption { var e string var ok bool - if e, ok = os.LookupEnv(envJaegerUser); ok { - if p, ok := os.LookupEnv(envJaegerCred); ok { - opts.Username = e - opts.Password = p - } else { - log.Warn("jaeger username supplied with no password. authentication will not be used.") - } - } + if e, ok = os.LookupEnv(envCollectorEndpoint); ok { - opts.CollectorEndpoint = e - log.Infof("jaeger tracess will send to collector %s", e) - return true - } - if e, ok = os.LookupEnv(envAgentEndpoint); ok { - log.Infof("jaeger traces will be sent to agent %s", e) - opts.AgentEndpoint = e - return true - } - if e, ok = os.LookupEnv(envAgentHost); ok { - if p, ok := os.LookupEnv(envAgentPort); ok { - opts.AgentEndpoint = strings.Join([]string{e, p}, ":") - } else { - opts.AgentEndpoint = strings.Join([]string{e, "6831"}, ":") + options := []jaeger.CollectorEndpointOption{jaeger.WithEndpoint(e)} + if u, ok := os.LookupEnv(envJaegerUser); ok { + if p, ok := os.LookupEnv(envJaegerCred); ok { + options = append(options, jaeger.WithUsername(u)) + options = append(options, jaeger.WithPassword(p)) + } else { + log.Warn("jaeger username supplied with no password. authentication will not be used.") + } } - log.Infof("jaeger traces will be sent to agent %s", opts.AgentEndpoint) - return true + log.Infof("jaeger tracess will send to collector %s", e) + return jaeger.WithCollectorEndpoint(options...) } - return false + + if e, ok = os.LookupEnv(envAgentHost); ok { + options := []jaeger.AgentEndpointOption{jaeger.WithAgentHost(e), jaeger.WithLogger(zap.NewStdLog(log.Desugar()))} + var ep string + if p, ok := os.LookupEnv(envAgentPort); ok { + options = append(options, jaeger.WithAgentPort(p)) + ep = strings.Join([]string{e, p}, ":") + } else { + ep = strings.Join([]string{e, "6831"}, ":") + } + log.Infof("jaeger traces will be sent to agent %s", ep) + return jaeger.WithAgentEndpoint(options...) + } + return nil } -func SetupJaegerTracing(serviceName string) *jaeger.Exporter { - opts := jaeger.Options{} - if !jaegerOptsFromEnv(&opts) { +func SetupJaegerTracing(serviceName string) *tracesdk.TracerProvider { + jaegerEndpoint := jaegerOptsFromEnv() + if jaegerEndpoint == nil { return nil } - opts.ServiceName = serviceName - je, err := jaeger.NewExporter(opts) + je, err := jaeger.New(jaegerEndpoint) if err != nil { log.Errorw("failed to create the jaeger exporter", "error", err) return nil } - - trace.RegisterExporter(je) - trace.ApplyConfig(trace.Config{ - DefaultSampler: trace.AlwaysSample(), - }) - return je + tp := tracesdk.NewTracerProvider( + // Always be sure to batch in production. + tracesdk.WithBatcher(je), + // Record information about this application in an Resource. + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(serviceName), + )), + tracesdk.WithSampler(tracesdk.AlwaysSample()), + ) + otel.SetTracerProvider(tp) + tracer := tp.Tracer(serviceName) + octrace.DefaultTracer = opencensus.NewTracer(tracer) + return tp } diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index c0f69d58c..1f6191a94 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -622,5 +622,112 @@ "AddVerifiedClient", "UseBytes", "RestoreBytes" + ], + "fil/7/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/7/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/7/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/7/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/7/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/7/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/7/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/7/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate", + "ProveReplicaUpdates" + ], + "fil/7/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "OnEpochTickEnd", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/7/system": [ + "Send", + "Constructor" + ], + "fil/7/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes" ] } \ No newline at end of file diff --git a/markets/dagstore/blockstore.go b/markets/dagstore/blockstore.go index 8980d40cf..317cb08b9 100644 --- a/markets/dagstore/blockstore.go +++ b/markets/dagstore/blockstore.go @@ -1,6 +1,7 @@ package dagstore import ( + "context" "io" blocks "github.com/ipfs/go-block-format" @@ -20,14 +21,14 @@ type Blockstore struct { var _ bstore.Blockstore = (*Blockstore)(nil) -func (b *Blockstore) DeleteBlock(c cid.Cid) error { +func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.Errorf("DeleteBlock called but not implemented") } -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(context.Context, blocks.Block) error { return xerrors.Errorf("Put called but not implemented") } -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.Errorf("PutMany called but not implemented") } diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go index 4cd0a2d68..94eaadef4 100644 --- a/markets/storageadapter/ondealsectorcommitted.go +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -5,6 +5,7 @@ import ( "context" "sync" + "github.com/filecoin-project/go-bitfield" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -110,7 +111,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, // Watch for a pre-commit message to the provider. matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch || msg.Method == miner.Methods.ProveReplicaUpdates) return matched, nil } @@ -145,6 +146,20 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return false, err } + // If this is a replica update method that succeeded the deal is active + if msg.Method == miner.Methods.ProveReplicaUpdates { + sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res) + if err != nil { + return false, err + } + if sn != nil { + cb(*sn, true, nil) + return false, nil + } + // Didn't find the deal ID in this message, so keep looking + return true, nil + } + // Extract the message parameters sn, err := dealSectorInPreCommitMsg(msg, res) if err != nil { @@ -264,6 +279,42 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr return nil } +func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + var params miner.ProveReplicaUpdatesParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal prove replica update: %w", err) + } + + var seekUpdate miner.ReplicaUpdate + var found bool + for _, update := range params.Updates { + for _, did := range update.Deals { + if did == res.DealID { + seekUpdate = update + found = true + break + } + } + } + if !found { + return nil, nil + } + + // check that this update passed validation steps + var successBf bitfield.BitField + if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil { + return nil, xerrors.Errorf("unmarshal return value: %w", err) + } + success, err := successBf.IsSet(uint64(seekUpdate.SectorID)) + if err != nil { + return nil, xerrors.Errorf("failed to check success of replica update: %w", err) + } + if !success { + return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID) + } + return &seekUpdate.SectorID, nil +} + // dealSectorInPreCommitMsg tries to find a sector containing the specified deal func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { switch msg.Method { diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 5c82d0dc8..0828db271 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -4,7 +4,6 @@ package storageadapter import ( "context" - "io" "time" "github.com/ipfs/go-cid" @@ -88,7 +87,7 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) } -func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData io.Reader) (*storagemarket.PackingResult, error) { +func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { if deal.PublishCid == nil { return nil, xerrors.Errorf("deal.PublishCid can't be nil") } @@ -104,17 +103,32 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema KeepUnsealed: deal.FastRetrieval, } + // Attempt to add the piece to the sector p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) curTime := build.Clock.Now() for build.Clock.Since(curTime) < addPieceRetryTimeout { + // Check if there was an error because of too many sectors being sealed if !xerrors.Is(err, sealing.ErrTooManySectorsSealing) { if err != nil { log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) } + + // There was either a fatal error or no error. In either case + // don't retry AddPiece break } + + // The piece could not be added to the sector because there are too + // many sectors being sealed, back-off for a while before trying again select { case <-build.Clock.After(addPieceRetryWait): + // Reset the reader to the start + err = pieceData.SeekStart() + if err != nil { + return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err) + } + + // Attempt to add the piece again p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) case <-ctx.Done(): return nil, xerrors.New("context expired while waiting to retry AddPiece") @@ -228,7 +242,7 @@ func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Addre // TODO: why doesnt this method take in a sector ID? func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { - refs, err := n.secb.GetRefs(dealID) + refs, err := n.secb.GetRefs(ctx, dealID) if err != nil { return 0, 0, 0, err } diff --git a/miner/miner.go b/miner/miner.go index 582ade723..976e9ca6f 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -325,7 +325,7 @@ minerLoop: "block-time", btime, "time", build.Clock.Now(), "difference", build.Clock.Since(btime)) } - if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { + if err := m.sf.MinedBlock(ctx, b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { continue @@ -535,8 +535,12 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type prand := abi.PoStRandomness(rand) tSeed := build.Clock.Now() + nv, err := m.api.StateNetworkVersion(ctx, base.TipSet.Key()) + if err != nil { + return nil, err + } - postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand) + postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand, round, nv) if err != nil { err = xerrors.Errorf("failed to compute winning post proof: %w", err) return nil, err diff --git a/miner/warmup.go b/miner/warmup.go index 991679c09..be5ac3ea7 100644 --- a/miner/warmup.go +++ b/miner/warmup.go @@ -10,8 +10,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/chain/types" ) @@ -61,13 +60,22 @@ out: return xerrors.Errorf("getting sector info: %w", err) } - _, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{ + ts, err := m.api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head") + } + nv, err := m.api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return xerrors.Errorf("getting network version") + } + + _, err = m.epp.ComputeProof(ctx, []proof7.ExtendedSectorInfo{ { SealProof: si.SealProof, SectorNumber: sector, SealedCID: si.SealedCID, }, - }, r) + }, r, ts.Height(), nv) if err != nil { return xerrors.Errorf("failed to compute proof: %w", err) } diff --git a/node/builder.go b/node/builder.go index 0520d62dd..96d217ec3 100644 --- a/node/builder.go +++ b/node/builder.go @@ -19,7 +19,6 @@ import ( "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-peerstore/pstoremem" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/p2p/net/conngater" @@ -162,6 +161,12 @@ func defaults() []Option { }), Override(new(dtypes.ShutdownChan), make(chan struct{})), + + // the great context in the sky, otherwise we can't DI build genesis; there has to be a better + // solution than this hack. + Override(new(context.Context), func(lc fx.Lifecycle, mctx helpers.MetricsCtx) context.Context { + return helpers.LifecycleCtx(mctx, lc) + }), } } @@ -170,7 +175,7 @@ var LibP2P = Options( Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), // Host dependencies - Override(new(peerstore.Peerstore), pstoremem.NewPeerstore), + Override(new(peerstore.Peerstore), lp2p.Peerstore), Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys), Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)), diff --git a/node/builder_miner.go b/node/builder_miner.go index 74b0c5558..e813a2d24 100644 --- a/node/builder_miner.go +++ b/node/builder_miner.go @@ -136,7 +136,7 @@ func ConfigStorageMiner(c interface{}) Option { If(cfg.Subsystems.EnableMarkets, // Markets Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForStoragePerClient, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), @@ -164,7 +164,9 @@ func ConfigStorageMiner(c interface{}) Option { Override(HandleRetrievalKey, modules.HandleRetrieval), // Markets (storage) - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), + Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), + Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), + Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDataTransfer), Override(new(*storedask.StoredAsk), modules.NewStorageAsk), Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, nil)), Override(new(storagemarket.StorageProvider), modules.StorageProvider), diff --git a/node/config/def.go b/node/config/def.go index 735107e29..9c39c197c 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -109,10 +109,11 @@ func DefaultStorageMiner() *StorageMiner { AvailableBalanceBuffer: types.FIL(big.Zero()), DisableCollateralFallback: false, - BatchPreCommits: true, - MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors - PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket - PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors + PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket + // XXX snap deals wait deals slack if first + PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)), @@ -131,11 +132,13 @@ func DefaultStorageMiner() *StorageMiner { }, Storage: sectorstorage.SealerConfig{ - AllowAddPiece: true, - AllowPreCommit1: true, - AllowPreCommit2: true, - AllowCommit: true, - AllowUnseal: true, + AllowAddPiece: true, + AllowPreCommit1: true, + AllowPreCommit2: true, + AllowCommit: true, + AllowUnseal: true, + AllowReplicaUpdate: true, + AllowProveReplicaUpdate2: true, // Default to 10 - tcp should still be able to figure this out, and // it's the ratio between 10gbit / 1gbit @@ -160,8 +163,9 @@ func DefaultStorageMiner() *StorageMiner { MaxDealsPerPublishMsg: 8, MaxProviderCollateralMultiplier: 2, - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, + SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, + SimultaneousTransfersForStoragePerClient: 0, + SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 296501edc..eded0b1fe 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -272,6 +272,17 @@ passed to the sealing node by the markets service. 0 is unlimited.`, Comment: `The maximum number of parallel online data transfers for storage deals`, }, + { + Name: "SimultaneousTransfersForStoragePerClient", + Type: "uint64", + + Comment: `The maximum number of simultaneous data transfers from any single client +for storage deals. +Unset by default (0), and values higher than SimultaneousTransfersForStorage +will have no effect; i.e. the total number of simultaneous data transfers +across all storage clients is bound by SimultaneousTransfersForStorage +regardless of this number.`, + }, { Name: "SimultaneousTransfersForRetrieval", Type: "uint64", diff --git a/node/config/types.go b/node/config/types.go index 1be40029e..2ae2d8eee 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -131,6 +131,13 @@ type DealmakingConfig struct { MaxStagingDealsBytes int64 // The maximum number of parallel online data transfers for storage deals SimultaneousTransfersForStorage uint64 + // The maximum number of simultaneous data transfers from any single client + // for storage deals. + // Unset by default (0), and values higher than SimultaneousTransfersForStorage + // will have no effect; i.e. the total number of simultaneous data transfers + // across all storage clients is bound by SimultaneousTransfersForStorage + // regardless of this number. + SimultaneousTransfersForStoragePerClient uint64 // The maximum number of parallel online data transfers for retrieval deals SimultaneousTransfersForRetrieval uint64 // Minimum start epoch buffer to give time for sealing of sector with deal. diff --git a/node/hello/hello.go b/node/hello/hello.go index 5461dcc87..ac92d5170 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -141,7 +141,7 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { return err } - gen, err := hs.cs.GetGenesis() + gen, err := hs.cs.GetGenesis(ctx) if err != nil { return err } diff --git a/node/impl/backup.go b/node/impl/backup.go index 10f673a4b..7acc7e018 100644 --- a/node/impl/backup.go +++ b/node/impl/backup.go @@ -1,6 +1,7 @@ package impl import ( + "context" "os" "path/filepath" "strings" @@ -12,7 +13,7 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func backup(mds dtypes.MetadataDS, fpath string) error { +func backup(ctx context.Context, mds dtypes.MetadataDS, fpath string) error { bb, ok := os.LookupEnv("LOTUS_BACKUP_BASE_PATH") if !ok { return xerrors.Errorf("LOTUS_BACKUP_BASE_PATH env var not set") @@ -52,7 +53,7 @@ func backup(mds dtypes.MetadataDS, fpath string) error { return xerrors.Errorf("open %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(ctx, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 4b6903d94..7848c84f9 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -150,7 +150,7 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt if err != nil { return nil, xerrors.Errorf("failed to find blockstore for root CID: %w", err) } - if has, err := bs.Has(params.Data.Root); err != nil { + if has, err := bs.Has(ctx, params.Data.Root); err != nil { return nil, xerrors.Errorf("failed to query blockstore for root CID: %w", err) } else if !has { return nil, xerrors.Errorf("failed to find root CID in blockstore: %w", err) @@ -490,6 +490,7 @@ func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, paylo Size: queryResponse.Size, MinPrice: queryResponse.PieceRetrievalPrice(), UnsealPrice: queryResponse.UnsealPrice, + PricePerByte: queryResponse.MinPricePerByte, PaymentInterval: queryResponse.MaxPaymentInterval, PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease, Miner: queryResponse.PaymentAddress, // TODO: check @@ -520,7 +521,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor } defer f.Close() //nolint:errcheck - hd, _, err := car.ReadHeader(bufio.NewReader(f)) + hd, err := car.ReadHeader(bufio.NewReader(f)) if err != nil { return nil, xerrors.Errorf("failed to read CAR header: %w", err) } @@ -1028,7 +1029,7 @@ func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blo } if cs.Visit(c) { - nb, err := bs.Get(c) + nb, err := bs.Get(ctx, c) if err != nil { return xerrors.Errorf("getting block data: %w", err) } @@ -1285,7 +1286,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet } // check that the data is a car file; if it's not, retrieval won't work - _, _, err = car.ReadHeader(bufio.NewReader(rdr)) + _, err = car.ReadHeader(bufio.NewReader(rdr)) if err != nil { return nil, xerrors.Errorf("not a car file: %w", err) } diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go index bf7ff7735..1be225278 100644 --- a/node/impl/client/client_test.go +++ b/node/impl/client/client_test.go @@ -107,7 +107,7 @@ func TestImportLocal(t *testing.T) { // recreate the unixfs dag, and see if it matches the original file byte by byte // import the car into a memory blockstore, then export the unixfs file. bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - _, err = car.LoadCar(bs, exported.DataReader()) + _, err = car.LoadCar(ctx, bs, exported.DataReader()) require.NoError(t, err) dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) diff --git a/node/impl/full.go b/node/impl/full.go index f9c83ded0..a6f61c305 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -42,7 +42,7 @@ type FullNodeAPI struct { } func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(n.DS, fpath) + return backup(ctx, n.DS, fpath) } func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (status api.NodeStatus, err error) { diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index 4ffbe0e63..ce492f94a 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -99,11 +99,11 @@ func (m *ChainModule) ChainHead(context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { - return a.Chain.GetBlock(msg) + return a.Chain.GetBlock(ctx, msg) } func (m *ChainModule) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return m.Chain.LoadTipSet(key) + return m.Chain.LoadTipSet(ctx, key) } func (m *ChainModule) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) { @@ -111,12 +111,12 @@ func (m *ChainModule) ChainGetPath(ctx context.Context, from, to types.TipSetKey } func (m *ChainModule) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) { - b, err := m.Chain.GetBlock(msg) + b, err := m.Chain.GetBlock(ctx, msg) if err != nil { return nil, err } - bmsgs, smsgs, err := m.Chain.MessagesForBlock(b) + bmsgs, smsgs, err := m.Chain.MessagesForBlock(ctx, b) if err != nil { return nil, err } @@ -143,7 +143,7 @@ func (a *ChainAPI) ChainGetPath(ctx context.Context, from types.TipSetKey, to ty } func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]api.Message, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -154,12 +154,12 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } @@ -176,7 +176,7 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -186,19 +186,19 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } var out []*types.MessageReceipt for i := 0; i < len(cm); i++ { - r, err := a.Chain.GetParentReceipt(b, i) + r, err := a.Chain.GetParentReceipt(ctx, b, i) if err != nil { return nil, err } @@ -210,7 +210,7 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, err } @@ -220,7 +220,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe return nil, nil } - cm, err := a.Chain.MessagesForTipset(ts) + cm, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, err } @@ -237,7 +237,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe } func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -245,7 +245,7 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo } func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -253,7 +253,7 @@ func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.Chain } func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := m.ExposedBlockstore.Get(obj) + blk, err := m.ExposedBlockstore.Get(ctx, obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -262,11 +262,11 @@ func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, er } func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return a.ExposedBlockstore.DeleteBlock(obj) + return a.ExposedBlockstore.DeleteBlock(ctx, obj) } func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return m.ExposedBlockstore.Has(obj) + return m.ExposedBlockstore.Has(ctx, obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { @@ -318,7 +318,7 @@ func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) } func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { - newHeadTs, err := a.Chain.GetTipSetFromKey(tsk) + newHeadTs, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -342,11 +342,11 @@ func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error } } - return a.Chain.SetHead(newHeadTs) + return a.Chain.SetHead(ctx, newHeadTs) } func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { - genb, err := a.Chain.GetGenesis() + genb, err := a.Chain.GetGenesis(ctx) if err != nil { return nil, err } @@ -355,7 +355,7 @@ func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -434,7 +434,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put hamt val: %w", err) } @@ -482,7 +482,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -530,7 +530,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -577,7 +577,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, } func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - cm, err := m.Chain.GetCMessage(mc) + cm, err := m.Chain.GetCMessage(ctx, mc) if err != nil { return nil, err } @@ -586,7 +586,7 @@ func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.M } func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipoldmsgs bool, tsk types.TipSetKey) (<-chan []byte, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index edf53ff63..dd1e8d5ea 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -82,14 +82,14 @@ type GasMeta struct { Limit int64 } -func (g *GasPriceCache) GetTSGasStats(cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { +func (g *GasPriceCache) GetTSGasStats(ctx context.Context, cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { i, has := g.c.Get(ts.Key()) if has { return i.([]GasMeta), nil } var prices []GasMeta - msgs, err := cstore.MessagesForTipset(ts) + msgs, err := cstore.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("loading messages: %w", err) } @@ -173,7 +173,7 @@ func (a *GasAPI) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(a.Chain, a.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, a.Chain, a.PriceCache, nblocksincl) } func (m *GasModule) GasEstimateGasPremium( ctx context.Context, @@ -182,9 +182,9 @@ func (m *GasModule) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(m.Chain, m.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, m.Chain, m.PriceCache, nblocksincl) } -func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { +func gasEstimateGasPremium(ctx context.Context, cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { if nblocksincl == 0 { nblocksincl = 1 } @@ -198,13 +198,13 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc break // genesis } - pts, err := cstore.LoadTipSet(ts.Parents()) + pts, err := cstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return types.BigInt{}, err } blocks += len(pts.Blocks()) - meta, err := cache.GetTSGasStats(cstore, pts) + meta, err := cache.GetTSGasStats(ctx, cstore, pts) if err != nil { return types.BigInt{}, err } @@ -236,14 +236,14 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc } func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn, ts) } func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } @@ -283,7 +283,7 @@ func gasEstimateGasLimit( if err != stmgr.ErrExpensiveFork { break } - ts, err = cstore.GetTipSetFromKey(ts.Parents()) + ts, err = cstore.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return -1, xerrors.Errorf("getting parent tipset: %w", err) } diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index f792cdf99..afff871ca 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -51,11 +51,11 @@ func (a *MpoolAPI) MpoolGetConfig(context.Context) (*types.MpoolConfig, error) { } func (a *MpoolAPI) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { - return a.Mpool.SetConfig(cfg) + return a.Mpool.SetConfig(ctx, cfg) } func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQuality float64) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -64,7 +64,7 @@ func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQ } func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -87,7 +87,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty // different blocks in tipsets of the same height // we exclude messages that have been included in blocks in the mpool tipset - have, err := a.Mpool.MessagesForBlocks(mpts.Blocks()) + have, err := a.Mpool.MessagesForBlocks(ctx, mpts.Blocks()) if err != nil { return nil, xerrors.Errorf("getting messages for base ts: %w", err) } @@ -97,7 +97,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty } } - msgs, err := a.Mpool.MessagesForBlocks(ts.Blocks()) + msgs, err := a.Mpool.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { return nil, xerrors.Errorf(": %w", err) } @@ -115,7 +115,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty return pending, nil } - ts, err = a.Chain.LoadTipSet(ts.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset: %w", err) } diff --git a/node/impl/full/state.go b/node/impl/full/state.go index e251fa3d5..dfd1c69d9 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -132,7 +132,7 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad } func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err) } @@ -250,7 +250,7 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, } func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -345,7 +345,7 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres } func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -363,7 +363,7 @@ func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, } func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -372,7 +372,7 @@ func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types. if err != stmgr.ErrExpensiveFork { break } - ts, err = a.Chain.GetTipSetFromKey(ts.Parents()) + ts, err = a.Chain.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("getting parent tipset: %w", err) } @@ -395,17 +395,17 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. msgToReplay = mlkp.Message - executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet) + executionTs, err := a.Chain.GetTipSetFromKey(ctx, mlkp.TipSet) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err) } - ts, err = a.Chain.LoadTipSet(executionTs.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, executionTs.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err) } } else { - ts, err = a.Chain.LoadTipSet(tsk) + ts, err = a.Chain.LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading specified tipset %s: %w", tsk, err) } @@ -433,7 +433,7 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. } func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (a *types.Actor, err error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -441,7 +441,7 @@ func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, } func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -450,7 +450,7 @@ func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, t } func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -459,7 +459,7 @@ func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, } func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -468,7 +468,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts return nil, xerrors.Errorf("getting actor: %w", err) } - blk, err := a.Chain.StateBlockstore().Get(act.Head) + blk, err := a.Chain.StateBlockstore().Get(ctx, act.Head) if err != nil { return nil, xerrors.Errorf("getting actor head: %w", err) } @@ -556,7 +556,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence var returndec interface{} if recpt.ExitCode == 0 && len(recpt.Return) > 0 { - cmsg, err := m.Chain.GetCMessage(msg) + cmsg, err := m.Chain.GetCMessage(ctx, msg) if err != nil { return nil, xerrors.Errorf("failed to load message after successful receipt search: %w", err) } @@ -585,7 +585,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence } func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, msg cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { - fromTs, err := m.Chain.GetTipSetFromKey(tsk) + fromTs, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -607,7 +607,7 @@ func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, m } func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -615,7 +615,7 @@ func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) } func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -623,7 +623,7 @@ func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([] } func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.MarketBalance{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -633,7 +633,7 @@ func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Addre func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { out := map[string]api.MarketBalance{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -673,7 +673,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { out := map[string]api.MarketDeal{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -712,7 +712,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -777,7 +777,7 @@ func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Addre } func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -793,7 +793,7 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A } func (m *StateModule) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -825,7 +825,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre } func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -874,7 +874,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc var out []cid.Cid for ts.Height() >= toheight { - msgs, err := a.Chain.MessagesForTipset(ts) + msgs, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("failed to get messages for tipset (%s): %w", ts.Key(), err) } @@ -889,7 +889,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc break } - next, err := a.Chain.LoadTipSet(ts.Parents()) + next, err := a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next tipset: %w", err) } @@ -901,7 +901,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc } func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -917,7 +917,7 @@ func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs } func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -938,7 +938,7 @@ func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address. } func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MsigVesting, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.EmptyVesting, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -976,12 +976,12 @@ func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Addr } func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - startTs, err := m.Chain.GetTipSetFromKey(start) + startTs, err := m.Chain.GetTipSetFromKey(ctx, start) if err != nil { return types.EmptyInt, xerrors.Errorf("loading start tipset %s: %w", start, err) } - endTs, err := m.Chain.GetTipSetFromKey(end) + endTs, err := m.Chain.GetTipSetFromKey(ctx, end) if err != nil { return types.EmptyInt, xerrors.Errorf("loading end tipset %s: %w", end, err) } @@ -1016,7 +1016,7 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s } func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1053,7 +1053,7 @@ var initialPledgeNum = types.NewInt(110) var initialPledgeDen = types.NewInt(100) func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1114,7 +1114,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { // TODO: this repeats a lot of the previous function. Fix that. - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1190,7 +1190,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr } func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1219,7 +1219,7 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address } func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return false, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1319,7 +1319,7 @@ var dealProviderCollateralDen = types.NewInt(100) // StateDealProviderCollateralBounds returns the min and max collateral a storage provider // can issue. It takes the deal size and verified status as parameters. func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1365,7 +1365,7 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz powClaim.QualityAdjPower, rewPow, circ.FilCirculating, - m.StateManager.GetNtwkVersion(ctx, ts.Height())) + m.StateManager.GetNetworkVersion(ctx, ts.Height())) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("getting deal provider coll bounds: %w", err) } @@ -1376,7 +1376,7 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz } func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1397,7 +1397,7 @@ func stateVMCirculatingSupplyInternal( cstore *store.ChainStore, smgr *stmgr.StateManager, ) (api.CirculatingSupply, error) { - ts, err := cstore.GetTipSetFromKey(tsk) + ts, err := cstore.GetTipSetFromKey(ctx, tsk) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1411,14 +1411,14 @@ func stateVMCirculatingSupplyInternal( } func (m *StateModule) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err) } // TODO: Height-1 to be consistent with the rest of the APIs? // But that's likely going to break a bunch of stuff. - return m.StateManager.GetNtwkVersion(ctx, ts.Height()), nil + return m.StateManager.GetNetworkVersion(ctx, ts.Height()), nil } func (a *StateAPI) StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 652ae3ecb..bd7de65b1 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -51,25 +51,25 @@ func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) { } func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { - parent, err := a.Syncer.ChainStore().GetBlock(blk.Header.Parents[0]) + parent, err := a.Syncer.ChainStore().GetBlock(ctx, blk.Header.Parents[0]) if err != nil { return xerrors.Errorf("loading parent block: %w", err) } if a.SlashFilter != nil { - if err := a.SlashFilter.MinedBlock(blk.Header, parent.Height); err != nil { + if err := a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) return xerrors.Errorf(" SLASH FILTER ERROR: %w", err) } } // TODO: should we have some sort of fast path to adding a local block? - bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(blk.BlsMessages) + bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(ctx, blk.BlsMessages) if err != nil { return xerrors.Errorf("failed to load bls messages: %w", err) } - smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(blk.SecpkMessages) + smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(ctx, blk.SecpkMessages) if err != nil { return xerrors.Errorf("failed to load secpk message: %w", err) } @@ -137,12 +137,12 @@ func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error } func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { - ts, err := a.Syncer.ChainStore().LoadTipSet(tsk) + ts, err := a.Syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return false, err } - fts, err := a.Syncer.ChainStore().TryFillTipSet(ts) + fts, err := a.Syncer.ChainStore().TryFillTipSet(ctx, ts) if err != nil { return false, err } diff --git a/node/impl/paych/paych.go b/node/impl/paych/paych.go index 773a5efab..df3b1e3e4 100644 --- a/node/impl/paych/paych.go +++ b/node/impl/paych/paych.go @@ -35,11 +35,11 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t } func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFunds(ch) + return a.PaychMgr.AvailableFunds(ctx, ch) } func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFundsByFromTo(from, to) + return a.PaychMgr.AvailableFundsByFromTo(ctx, from, to) } func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { @@ -47,7 +47,7 @@ func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (add } func (a *PaychAPI) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { - return a.PaychMgr.AllocateLane(ch) + return a.PaychMgr.AllocateLane(ctx, ch) } func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) { @@ -60,7 +60,7 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address return nil, err } - lane, err := a.PaychMgr.AllocateLane(ch.Channel) + lane, err := a.PaychMgr.AllocateLane(ctx, ch.Channel) if err != nil { return nil, err } @@ -95,11 +95,11 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address } func (a *PaychAPI) PaychList(ctx context.Context) ([]address.Address, error) { - return a.PaychMgr.ListChannels() + return a.PaychMgr.ListChannels(ctx) } func (a *PaychAPI) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) { - ci, err := a.PaychMgr.GetChannelInfo(pch) + ci, err := a.PaychMgr.GetChannelInfo(ctx, pch) if err != nil { return nil, err } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 39baa97bf..3ebac1409 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -3,6 +3,7 @@ package impl import ( "context" "encoding/json" + "errors" "fmt" "net/http" "os" @@ -19,6 +20,9 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" + gsimpl "github.com/ipfs/go-graphsync/impl" + "github.com/ipfs/go-graphsync/peerstate" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" "go.uber.org/fx" @@ -28,10 +32,12 @@ import ( "github.com/filecoin-project/go-address" datatransfer "github.com/filecoin-project/go-data-transfer" + gst "github.com/filecoin-project/go-data-transfer/transport/graphsync" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -70,6 +76,8 @@ type StorageMinerAPI struct { RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` SectorAccessor retrievalmarket.SectorAccessor `optional:"true"` DataTransfer dtypes.ProviderDataTransfer `optional:"true"` + StagingGraphsync dtypes.StagingGraphsync `optional:"true"` + Transport dtypes.ProviderTransport `optional:"true"` DealPublisher *storageadapter.DealPublisher `optional:"true"` SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` Host host.Host `optional:"true"` @@ -300,11 +308,11 @@ func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]stri return out, nil } -func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { +func (sm *StorageMinerAPI) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) { // json can't handle cids as map keys out := map[string][]api.SealedRef{} - refs, err := sm.SectorBlocks.List() + refs, err := sm.SectorBlocks.List(ctx) if err != nil { return nil, err } @@ -379,8 +387,8 @@ func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.Se return sm.Miner.SectorPreCommitPending(ctx) } -func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { - return sm.Miner.MarkForUpgrade(id) +func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error { + return sm.Miner.MarkForUpgrade(ctx, id, snap) } func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { @@ -391,6 +399,10 @@ func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.Secto return sm.Miner.CommitPending(ctx) } +func (sm *StorageMinerAPI) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error { + return sm.Miner.SectorMatchPendingPiecesToOpenSectors(ctx) +} + func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { w, err := connectRemoteWorker(ctx, sm, url) if err != nil { @@ -553,6 +565,166 @@ func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-cha return channels, nil } +func (sm *StorageMinerAPI) MarketDataTransferDiagnostics(ctx context.Context, mpid peer.ID) (*api.TransferDiagnostics, error) { + gsTransport, ok := sm.Transport.(*gst.Transport) + if !ok { + return nil, errors.New("api only works for graphsync as transport") + } + graphsyncConcrete, ok := sm.StagingGraphsync.(*gsimpl.GraphSync) + if !ok { + return nil, errors.New("api only works for non-mock graphsync implementation") + } + + inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx) + if err != nil { + return nil, err + } + + allReceivingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) + allSendingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) + for channelID, channel := range inProgressChannels { + if channel.OtherPeer() != mpid { + continue + } + if channel.Status() == datatransfer.Completed { + continue + } + if channel.Status() == datatransfer.Failed || channel.Status() == datatransfer.Cancelled { + continue + } + if channel.SelfPeer() == channel.Sender() { + allSendingChannels[channelID] = channel + } else { + allReceivingChannels[channelID] = channel + } + } + + // gather information about active transport channels + transportChannels := gsTransport.ChannelsForPeer(mpid) + // gather information about graphsync state for peer + gsPeerState := graphsyncConcrete.PeerState(mpid) + + sendingTransfers := sm.generateTransfers(ctx, transportChannels.SendingChannels, gsPeerState.IncomingState, allSendingChannels) + receivingTransfers := sm.generateTransfers(ctx, transportChannels.ReceivingChannels, gsPeerState.OutgoingState, allReceivingChannels) + + return &api.TransferDiagnostics{ + SendingTransfers: sendingTransfers, + ReceivingTransfers: receivingTransfers, + }, nil +} + +// generate transfers matches graphsync state and data transfer state for a given peer +// to produce detailed output on what's happening with a transfer +func (sm *StorageMinerAPI) generateTransfers(ctx context.Context, + transportChannels map[datatransfer.ChannelID]gst.ChannelGraphsyncRequests, + gsPeerState peerstate.PeerState, + allChannels map[datatransfer.ChannelID]datatransfer.ChannelState) []*api.GraphSyncDataTransfer { + tc := &transferConverter{ + matchedChannelIds: make(map[datatransfer.ChannelID]struct{}), + matchedRequests: make(map[graphsync.RequestID]*api.GraphSyncDataTransfer), + gsDiagnostics: gsPeerState.Diagnostics(), + requestStates: gsPeerState.RequestStates, + allChannels: allChannels, + } + + // iterate through all operating data transfer transport channels + for channelID, channelRequests := range transportChannels { + originalState, err := sm.DataTransfer.ChannelState(ctx, channelID) + var baseDiagnostics []string + var channelState *api.DataTransferChannel + if err != nil { + baseDiagnostics = append(baseDiagnostics, fmt.Sprintf("Unable to lookup channel state: %s", err)) + } else { + cs := api.NewDataTransferChannel(sm.Host.ID(), originalState) + channelState = &cs + } + // add the current request for this channel + tc.convertTransfer(channelID, true, channelState, baseDiagnostics, channelRequests.Current, true) + for _, requestID := range channelRequests.Previous { + // add any previous requests that were cancelled for a restart + tc.convertTransfer(channelID, true, channelState, baseDiagnostics, requestID, false) + } + } + + // collect any graphsync data for channels we don't have any data transfer data for + tc.collectRemainingTransfers() + + return tc.transfers +} + +type transferConverter struct { + matchedChannelIds map[datatransfer.ChannelID]struct{} + matchedRequests map[graphsync.RequestID]*api.GraphSyncDataTransfer + transfers []*api.GraphSyncDataTransfer + gsDiagnostics map[graphsync.RequestID][]string + requestStates graphsync.RequestStates + allChannels map[datatransfer.ChannelID]datatransfer.ChannelState +} + +// convert transfer assembles transfer and diagnostic data for a given graphsync/data-transfer request +func (tc *transferConverter) convertTransfer(channelID datatransfer.ChannelID, hasChannelID bool, channelState *api.DataTransferChannel, baseDiagnostics []string, + requestID graphsync.RequestID, isCurrentChannelRequest bool) { + diagnostics := baseDiagnostics + state, hasState := tc.requestStates[requestID] + stateString := state.String() + if !hasState { + stateString = "no graphsync state found" + } + var channelIDPtr *datatransfer.ChannelID + if !hasChannelID { + diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %d", requestID)) + } else { + channelIDPtr = &channelID + if isCurrentChannelRequest && !hasState { + diagnostics = append(diagnostics, fmt.Sprintf("No current request state for data transfer channel id %s", channelID)) + } else if !isCurrentChannelRequest && hasState { + diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %d is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID)) + } + } + diagnostics = append(diagnostics, tc.gsDiagnostics[requestID]...) + transfer := &api.GraphSyncDataTransfer{ + RequestID: requestID, + RequestState: stateString, + IsCurrentChannelRequest: isCurrentChannelRequest, + ChannelID: channelIDPtr, + ChannelState: channelState, + Diagnostics: diagnostics, + } + tc.transfers = append(tc.transfers, transfer) + tc.matchedRequests[requestID] = transfer + if hasChannelID { + tc.matchedChannelIds[channelID] = struct{}{} + } +} + +func (tc *transferConverter) collectRemainingTransfers() { + for requestID := range tc.requestStates { + if _, ok := tc.matchedRequests[requestID]; !ok { + tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) + } + } + for requestID := range tc.gsDiagnostics { + if _, ok := tc.matchedRequests[requestID]; !ok { + tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) + } + } + for channelID, channelState := range tc.allChannels { + if _, ok := tc.matchedChannelIds[channelID]; !ok { + channelID := channelID + cs := api.NewDataTransferChannel(channelState.SelfPeer(), channelState) + transfer := &api.GraphSyncDataTransfer{ + RequestID: graphsync.RequestID(-1), + RequestState: "graphsync state unknown", + IsCurrentChannelRequest: false, + ChannelID: &channelID, + ChannelState: &cs, + Diagnostics: []string{"data transfer with no open transport channel, cannot determine linked graphsync request"}, + } + tc.transfers = append(tc.transfers, transfer) + } + } +} + func (sm *StorageMinerAPI) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) { return sm.DealPublisher.PendingDeals(), nil } @@ -948,7 +1120,7 @@ func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid. } func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(sm.DS, fpath) + return backup(ctx, sm.DS, fpath) } func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) { @@ -988,8 +1160,8 @@ func (sm *StorageMinerAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocume return build.OpenRPCDiscoverJSON_Miner(), nil } -func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { - return sm.Epp.ComputeProof(ctx, ssi, rand) +func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) { + return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv) } func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) { diff --git a/node/modules/chain.go b/node/modules/chain.go index 3518c3b29..ff957c9ad 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -30,7 +30,7 @@ import ( ) // ChainBitswap uses a blockstore that bypasses all caches. -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { +func ChainBitswap(lc fx.Lifecycle, mctx helpers.MetricsCtx, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -58,8 +58,8 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty return blockservice.New(bs, rem) } -func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { - mp, err := messagepool.New(mpp, ds, us, nn, j) +func MessagePool(lc fx.Lifecycle, mctx helpers.MetricsCtx, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { + mp, err := messagepool.New(helpers.LifecycleCtx(mctx, lc), mpp, ds, us, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) } @@ -73,23 +73,25 @@ func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Prov } func ChainStore(lc fx.Lifecycle, + mctx helpers.MetricsCtx, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, weight store.WeightFunc, + us stmgr.UpgradeSchedule, j journal.Journal) *store.ChainStore { chain := store.NewChainStore(cbs, sbs, ds, weight, j) - if err := chain.Load(); err != nil { + if err := chain.Load(helpers.LifecycleCtx(mctx, lc)); err != nil { log.Warnf("loading chain state from disk: %s", err) } var startHook func(context.Context) error if ss, ok := basebs.(*splitstore.SplitStore); ok { startHook = func(_ context.Context) error { - err := ss.Start(chain) + err := ss.Start(chain, us) if err != nil { err = xerrors.Errorf("error starting splitstore: %w", err) } diff --git a/node/modules/client.go b/node/modules/client.go index 4d988d98a..86063b8d9 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -40,11 +40,12 @@ import ( "github.com/filecoin-project/lotus/node/impl/full" payapi "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo/imports" ) -func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { +func HandleMigrateClientFunds(lc fx.Lifecycle, mctx helpers.MetricsCtx, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { addr, err := wallet.WalletDefaultAddress(ctx) @@ -52,7 +53,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full if err != nil { return nil } - b, err := ds.Get(datastore.NewKey("/marketfunds/client")) + b, err := ds.Get(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -73,7 +74,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full return nil } - return ds.Delete(datastore.NewKey("/marketfunds/client")) + return ds.Delete(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) }, }) } diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 6893908f7..542445b1e 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-graphsync" exchange "github.com/ipfs/go-ipfs-exchange-interface" + dtnet "github.com/filecoin-project/go-data-transfer/network" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" @@ -85,6 +86,7 @@ type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator // ProviderDataTransfer is a data transfer manager for the provider type ProviderDataTransfer datatransfer.Manager - +type ProviderTransferNetwork dtnet.DataTransferNetwork +type ProviderTransport datatransfer.Transport type StagingBlockstore blockstore.BasicBlockstore type StagingGraphsync graphsync.GraphExchange diff --git a/node/modules/genesis.go b/node/modules/genesis.go index 43443b125..03b4e2907 100644 --- a/node/modules/genesis.go +++ b/node/modules/genesis.go @@ -4,6 +4,8 @@ import ( "bytes" "os" + "go.uber.org/fx" + "github.com/ipfs/go-datastore" "github.com/ipld/go-car" "golang.org/x/xerrors" @@ -11,6 +13,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" ) func ErrorGenesis() Genesis { @@ -19,17 +22,18 @@ func ErrorGenesis() Genesis { } } -func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { - return func(bs dtypes.ChainBlockstore) Genesis { +func LoadGenesis(genBytes []byte) func(fx.Lifecycle, helpers.MetricsCtx, dtypes.ChainBlockstore) Genesis { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, bs dtypes.ChainBlockstore) Genesis { return func() (header *types.BlockHeader, e error) { - c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) + ctx := helpers.LifecycleCtx(mctx, lc) + c, err := car.LoadCar(ctx, bs, bytes.NewReader(genBytes)) if err != nil { return nil, xerrors.Errorf("loading genesis car file failed: %w", err) } if len(c.Roots) != 1 { return nil, xerrors.New("expected genesis file to have one root") } - root, err := bs.Get(c.Roots[0]) + root, err := bs.Get(ctx, c.Roots[0]) if err != nil { return nil, err } @@ -45,8 +49,9 @@ func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { func DoSetGenesis(_ dtypes.AfterGenesisSet) {} -func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { - genFromRepo, err := cs.GetGenesis() +func SetGenesis(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + genFromRepo, err := cs.GetGenesis(ctx) if err == nil { if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { expectedGenesis, err := g() @@ -69,5 +74,5 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) } - return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) + return dtypes.AfterGenesisSet{}, cs.SetGenesis(ctx, genesis) } diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index b0436e9c9..982d9f4cd 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" record "github.com/libp2p/go-libp2p-record" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -33,9 +34,11 @@ type P2PHostIn struct { type RawHost host.Host -func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { - ctx := helpers.LifecycleCtx(mctx, lc) +func Peerstore() (peerstore.Peerstore, error) { + return pstoremem.NewPeerstore() +} +func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { return nil, fmt.Errorf("missing private key for node ID: %s", params.ID.Pretty()) @@ -52,7 +55,7 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, opts = append(opts, o...) } - h, err := libp2p.New(ctx, opts...) + h, err := libp2p.New(opts...) if err != nil { return nil, err } diff --git a/node/modules/lp2p/libp2p.go b/node/modules/lp2p/libp2p.go index 4dee15ae9..5d8ece732 100644 --- a/node/modules/lp2p/libp2p.go +++ b/node/modules/lp2p/libp2p.go @@ -69,7 +69,11 @@ func genLibp2pKey() (crypto.PrivKey, error) { func ConnectionManager(low, high uint, grace time.Duration, protected []string) func() (opts Libp2pOpts, err error) { return func() (Libp2pOpts, error) { - cm := connmgr.NewConnManager(int(low), int(high), grace) + cm, err := connmgr.NewConnManager(int(low), int(high), connmgr.WithGracePeriod(grace)) + if err != nil { + return Libp2pOpts{}, err + } + for _, p := range protected { pid, err := peer.IDFromString(p) if err != nil { diff --git a/node/modules/services.go b/node/modules/services.go index 17d4a7476..299c01fb0 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -228,8 +228,8 @@ func BuiltinDrandConfig() dtypes.DrandSchedule { return build.DrandConfigSchedule() } -func RandomSchedule(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { - gen, err := p.Cs.GetGenesis() +func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { + gen, err := p.Cs.GetGenesis(helpers.LifecycleCtx(mctx, lc)) if err != nil { return nil, err } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index f4d00606f..3b8687525 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -39,7 +39,6 @@ import ( "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" graphsync "github.com/ipfs/go-graphsync/impl" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" gsnet "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/storeutil" "github.com/libp2p/go-libp2p-core/host" @@ -78,7 +77,7 @@ var ( ) func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) { - maddrb, err := ds.Get(datastore.NewKey("miner-address")) + maddrb, err := ds.Get(context.TODO(), datastore.NewKey("miner-address")) if err != nil { return address.Undef, err } @@ -300,7 +299,7 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - b, err := ds.Get(datastore.NewKey("/marketfunds/provider")) + b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/provider")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -331,18 +330,24 @@ func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api. return nil } - return ds.Delete(datastore.NewKey("/marketfunds/provider")) + return ds.Delete(ctx, datastore.NewKey("/marketfunds/provider")) }, }) } -// NewProviderDAGServiceDataTransfer returns a data transfer manager that just -// uses the provider's Staging DAG service for transfers -func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { - net := dtnet.NewFromLibp2pHost(h) +// NewProviderTransferNetwork sets up the libp2p2 protocol networking for data transfer +func NewProviderTransferNetwork(h host.Host) dtypes.ProviderTransferNetwork { + return dtnet.NewFromLibp2pHost(h) +} +// NewProviderTransport sets up a data transfer transport over graphsync +func NewProviderTransport(h host.Host, gs dtypes.StagingGraphsync, net dtypes.ProviderTransferNetwork) dtypes.ProviderTransport { + return dtgstransport.NewTransport(h.ID(), gs, net) +} + +// NewProviderDataTransfer returns a data transfer manager +func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) - transport := dtgstransport.NewTransport(h.ID(), gs, net) err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec if err != nil && !os.IsExist(err) { return nil, err @@ -396,7 +401,7 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe // StagingGraphsync creates a graphsync instance which reads and writes blocks // to the StagingBlockstore -func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { +func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { graphsyncNetwork := gsnet.NewFromLibp2pHost(h) lsys := storeutil.LinkSystemForBlockstore(ibs) @@ -405,9 +410,10 @@ func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRe lsys, graphsync.RejectAllRequestsByDefault(), graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), + graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer), graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), - graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), - graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) + graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), + graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) graphsyncStats(mctx, lc, gs) diff --git a/node/repo/imports/manager.go b/node/repo/imports/manager.go index d972ffb77..2deaa30af 100644 --- a/node/repo/imports/manager.go +++ b/node/repo/imports/manager.go @@ -1,6 +1,7 @@ package imports import ( + "context" "encoding/json" "fmt" "os" @@ -107,6 +108,7 @@ type Meta struct { // CreateImport initializes a new import, returning its ID and optionally a // CAR path where to place the data, if requested. func (m *Manager) CreateImport() (id ID, err error) { + ctx := context.TODO() id = ID(m.counter.Next()) meta := &Meta{Labels: map[LabelKey]LabelValue{ @@ -118,7 +120,7 @@ func (m *Manager) CreateImport() (id ID, err error) { return 0, xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), metajson) + err = m.ds.Put(ctx, id.dsKey(), metajson) if err != nil { return 0, xerrors.Errorf("failed to insert import metadata: %w", err) } @@ -129,7 +131,8 @@ func (m *Manager) CreateImport() (id ID, err error) { // AllocateCAR creates a new CAR allocated to the supplied import under the // root directory. func (m *Manager) AllocateCAR(id ID) (path string, err error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return "", xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -163,14 +166,15 @@ func (m *Manager) AllocateCAR(id ID) (path string, err error) { return "", xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), meta) + err = m.ds.Put(ctx, id.dsKey(), meta) return path, err } // AddLabel adds a label associated with an import, such as the source, // car path, CID, etc. func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -187,14 +191,15 @@ func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { return xerrors.Errorf("marshaling store meta: %w", err) } - return m.ds.Put(id.dsKey(), meta) + return m.ds.Put(ctx, id.dsKey(), meta) } // List returns all import IDs known by this Manager. func (m *Manager) List() ([]ID, error) { + ctx := context.TODO() var keys []ID - qres, err := m.ds.Query(query.Query{KeysOnly: true}) + qres, err := m.ds.Query(ctx, query.Query{KeysOnly: true}) if err != nil { return nil, xerrors.Errorf("query error: %w", err) } @@ -218,7 +223,9 @@ func (m *Manager) List() ([]ID, error) { // Info returns the metadata known to this store for the specified import ID. func (m *Manager) Info(id ID) (*Meta, error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return nil, xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -233,7 +240,8 @@ func (m *Manager) Info(id ID) (*Meta, error) { // Remove drops all data associated with the supplied import ID. func (m *Manager) Remove(id ID) error { - if err := m.ds.Delete(id.dsKey()); err != nil { + ctx := context.TODO() + if err := m.ds.Delete(ctx, id.dsKey()); err != nil { return xerrors.Errorf("removing import metadata: %w", err) } return nil diff --git a/paychmgr/accessorcache.go b/paychmgr/accessorcache.go index 176fbdd11..358cf7900 100644 --- a/paychmgr/accessorcache.go +++ b/paychmgr/accessorcache.go @@ -1,6 +1,10 @@ package paychmgr -import "github.com/filecoin-project/go-address" +import ( + "context" + + "github.com/filecoin-project/go-address" +) // accessorByFromTo gets a channel accessor for a given from / to pair. // The channel accessor facilitates locking a channel so that operations @@ -36,10 +40,10 @@ func (pm *Manager) accessorByFromTo(from address.Address, to address.Address) (* // The channel accessor facilitates locking a channel so that operations // must be performed sequentially on a channel (but can be performed at // the same time on different channels). -func (pm *Manager) accessorByAddress(ch address.Address) (*channelAccessor, error) { +func (pm *Manager) accessorByAddress(ctx context.Context, ch address.Address) (*channelAccessor, error) { // Get the channel from / to pm.lk.RLock() - channelInfo, err := pm.store.ByAddress(ch) + channelInfo, err := pm.store.ByAddress(ctx, ch) pm.lk.RUnlock() if err != nil { return nil, err diff --git a/paychmgr/manager.go b/paychmgr/manager.go index 460722945..e0fcd7a75 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -92,7 +92,7 @@ func newManager(pchstore *Store, pchapi managerAPI) (*Manager, error) { // Start restarts tracking of any messages that were sent to chain. func (pm *Manager) Start() error { - return pm.restartPending() + return pm.restartPending(pm.ctx) } // Stop shuts down any processes used by the manager @@ -110,27 +110,27 @@ func (pm *Manager) GetPaych(ctx context.Context, from, to address.Address, amt t return chanAccessor.getPaych(ctx, amt) } -func (pm *Manager) AvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } - ci, err := ca.getChannelInfo(ch) + ci, err := ca.getChannelInfo(ctx, ch) if err != nil { return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } -func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { +func (pm *Manager) AvailableFundsByFromTo(ctx context.Context, from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { ca, err := pm.accessorByFromTo(from, to) if err != nil { return nil, err } - ci, err := ca.outboundActiveByFromTo(from, to) + ci, err := ca.outboundActiveByFromTo(ctx, from, to) if err == ErrChannelNotTracked { // If there is no active channel between from / to we still want to // return an empty ChannelAvailableFunds, so that clients can check @@ -151,7 +151,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } // GetPaychWaitReady waits until the create channel / add funds message with the @@ -160,7 +160,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { // Find the channel associated with the message CID pm.lk.Lock() - ci, err := pm.store.ByMessageCid(mcid) + ci, err := pm.store.ByMessageCid(ctx, mcid) pm.lk.Unlock() if err != nil { @@ -178,25 +178,25 @@ func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address return chanAccessor.getPaychWaitReady(ctx, mcid) } -func (pm *Manager) ListChannels() ([]address.Address, error) { +func (pm *Manager) ListChannels(ctx context.Context) ([]address.Address, error) { // Need to take an exclusive lock here so that channel operations can't run // in parallel (see channelLock) pm.lk.Lock() defer pm.lk.Unlock() - return pm.store.ListChannels() + return pm.store.ListChannels(ctx) } -func (pm *Manager) GetChannelInfo(addr address.Address) (*ChannelInfo, error) { - ca, err := pm.accessorByAddress(addr) +func (pm *Manager) GetChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return nil, err } - return ca.getChannelInfo(addr) + return ca.getChannelInfo(ctx, addr) } func (pm *Manager) CreateVoucher(ctx context.Context, ch address.Address, voucher paych.SignedVoucher) (*api.VoucherCreateResult, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -223,7 +223,7 @@ func (pm *Manager) CheckVoucherSpendable(ctx context.Context, ch address.Address if len(proof) > 0 { return false, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return false, err } @@ -237,7 +237,7 @@ func (pm *Manager) AddVoucherOutbound(ctx context.Context, ch address.Address, s if len(proof) > 0 { return types.NewInt(0), errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return types.NewInt(0), err } @@ -283,7 +283,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) defer pm.lk.Unlock() // Check if channel is in store - ci, err := pm.store.ByAddress(ch) + ci, err := pm.store.ByAddress(ctx, ch) if err == nil { // Channel is in store, so it's already being tracked return ci, nil @@ -316,7 +316,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) } // Save channel to store - return pm.store.TrackChannel(stateCi) + return pm.store.TrackChannel(ctx, stateCi) } // TODO: secret vs proof doesn't make sense, there is only one, not two @@ -324,23 +324,23 @@ func (pm *Manager) SubmitVoucher(ctx context.Context, ch address.Address, sv *pa if len(proof) > 0 { return cid.Undef, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return cid.Undef, err } return ca.submitVoucher(ctx, ch, sv, secret) } -func (pm *Manager) AllocateLane(ch address.Address) (uint64, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return 0, err } - return ca.allocateLane(ch) + return ca.allocateLane(ctx, ch) } func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -348,7 +348,7 @@ func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*Vou } func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } @@ -356,7 +356,7 @@ func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, e } func (pm *Manager) Collect(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych.go b/paychmgr/paych.go index e5e47dfca..16c6604c6 100644 --- a/paychmgr/paych.go +++ b/paychmgr/paych.go @@ -95,18 +95,18 @@ func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Addr return paych.Message(av, from), nil } -func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) getChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.ByAddress(addr) + return ca.store.ByAddress(ctx, addr) } -func (ca *channelAccessor) outboundActiveByFromTo(from, to address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) outboundActiveByFromTo(ctx context.Context, from, to address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.OutboundActiveByFromTo(from, to) + return ca.store.OutboundActiveByFromTo(ctx, from, to) } // createVoucher creates a voucher with the given specification, setting its @@ -118,7 +118,7 @@ func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address defer ca.lk.Unlock() // Find the channel for the voucher - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return nil, xerrors.Errorf("failed to get channel info by address: %w", err) } @@ -229,7 +229,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add } // Check the voucher against the highest known voucher nonce / value - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -298,7 +298,7 @@ func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address return false, err } - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return false, err } @@ -351,7 +351,7 @@ func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, s } func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta types.BigInt) (types.BigInt, error) { - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return types.BigInt{}, err } @@ -400,14 +400,14 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad ci.NextLane = sv.Lane + 1 } - return delta, ca.store.putChannelInfo(ci) + return delta, ca.store.putChannelInfo(ctx, ci) } func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -453,7 +453,7 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address } // Mark the voucher and any lower-nonce vouchers as having been submitted - err = ca.store.MarkVoucherSubmitted(ci, sv) + err = ca.store.MarkVoucherSubmitted(ctx, ci, sv) if err != nil { return cid.Undef, err } @@ -461,11 +461,11 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address return smsg.Cid(), nil } -func (ca *channelAccessor) allocateLane(ch address.Address) (uint64, error) { +func (ca *channelAccessor) allocateLane(ctx context.Context, ch address.Address) (uint64, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.AllocateLane(ch) + return ca.store.AllocateLane(ctx, ch) } func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { @@ -474,12 +474,12 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) // TODO: just having a passthrough method like this feels odd. Seems like // there should be some filtering we're doing here - return ca.store.VouchersForPaych(ch) + return ca.store.VouchersForPaych(ctx, ch) } // laneState gets the LaneStates from chain, then applies all vouchers in // the data store over the chain state -func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { +func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct // (but technically dont't need to) @@ -501,7 +501,7 @@ func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map } // Apply locally stored vouchers - vouchers, err := ca.store.VouchersForPaych(ch) + vouchers, err := ca.store.VouchersForPaych(ctx, ch) if err != nil && err != ErrChannelNotTracked { return nil, err } @@ -583,7 +583,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -602,7 +602,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. } ci.Settling = true - err = ca.store.putChannelInfo(ci) + err = ca.store.putChannelInfo(ctx, ci) if err != nil { log.Errorf("Error marking channel as settled: %s", err) } @@ -614,7 +614,7 @@ func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index ab04ad7e0..165c945b8 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -353,16 +353,16 @@ func TestAddVoucherNextLane(t *testing.T) { _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err := s.mgr.GetChannelInfo(s.ch) + ci, err := s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 3) // Allocate a lane (should be lane 3) - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 3) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -372,7 +372,7 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -382,22 +382,24 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 8) } func TestAllocateLane(t *testing.T) { + ctx := context.Background() + // Set up a manager with a single payment channel s := testSetupMgrWithChannel(t) // First lane should be 0 - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 0) // Next lane should be 1 - lane, err = s.mgr.AllocateLane(s.ch) + lane, err = s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 1) } @@ -446,7 +448,7 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) { require.NoError(t, err) // Allocate lane should return the next lane (lane 3) - lane, err := mgr.AllocateLane(ch) + lane, err := mgr.AllocateLane(ctx, ch) require.NoError(t, err) require.EqualValues(t, 3, lane) } @@ -746,7 +748,7 @@ func testSetupMgrWithChannel(t *testing.T) *testScaffold { Target: toAcct, Direction: DirOutbound, } - err = mgr.store.putChannelInfo(ci) + err = mgr.store.putChannelInfo(context.Background(), ci) require.NoError(t, err) // Add the from signing key to the wallet diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index e6b94db57..2aacaf6c2 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -87,7 +87,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -112,7 +112,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -121,7 +121,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -135,13 +135,13 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should still have one channel - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Channel amount should include last amount sent to GetPaych - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 15, ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -203,12 +203,12 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) }() @@ -259,12 +259,12 @@ func TestPaychGetRecoverAfterError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -311,12 +311,12 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.Error(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -338,13 +338,13 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt3.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -384,7 +384,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -409,7 +409,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -418,7 +418,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -481,13 +481,13 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt2.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -917,7 +917,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // No channel created yet so available funds should be all zeroes - av, err := mgr.AvailableFundsByFromTo(from, to) + av, err := mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -932,7 +932,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should reflect create channel message sent - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) @@ -961,7 +961,7 @@ func TestPaychAvailableFunds(t *testing.T) { waitForQueueSize(t, mgr, from, to, 1) // Available funds should now include queued funds - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -996,7 +996,7 @@ func TestPaychAvailableFunds(t *testing.T) { // Available funds should now include the channel and also a wait sentinel // for the add funds message - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -1018,7 +1018,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should no longer have a wait sentinel - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -1039,7 +1039,7 @@ func TestPaychAvailableFunds(t *testing.T) { _, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, types.NewInt(0)) require.NoError(t, err) - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go index f17f961e2..43a006200 100644 --- a/paychmgr/settle_test.go +++ b/paychmgr/settle_test.go @@ -63,7 +63,7 @@ func TestPaychSettle(t *testing.T) { require.NotEqual(t, ch, ch2) // There should now be two channels - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 2) } diff --git a/paychmgr/simple.go b/paychmgr/simple.go index f93c6d5bd..502338e29 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -159,7 +159,7 @@ func (m *mergedFundsReq) sum() types.BigInt { func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (address.Address, cid.Cid, error) { // Add the request to add funds to a queue and wait for the result freq := newFundsReq(ctx, amt) - ca.enqueue(freq) + ca.enqueue(ctx, freq) select { case res := <-freq.promise: return res.channel, res.mcid, res.err @@ -170,16 +170,16 @@ func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (addr } // Queue up an add funds operation -func (ca *channelAccessor) enqueue(task *fundsReq) { +func (ca *channelAccessor) enqueue(ctx context.Context, task *fundsReq) { ca.lk.Lock() defer ca.lk.Unlock() ca.fundsReqQueue = append(ca.fundsReqQueue, task) - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } // Run the operations in the queue -func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) processQueue(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { ca.lk.Lock() defer ca.lk.Unlock() @@ -188,7 +188,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // If there's nothing in the queue, bail out if len(ca.fundsReqQueue) == 0 { - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // Merge all pending requests into one. @@ -199,7 +199,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if amt.IsZero() { // Note: The amount can be zero if requests are cancelled as we're // building the mergedFundsReq - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } res := ca.processTask(merged.ctx, amt) @@ -209,7 +209,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if res == nil { // Stop processing the fundsReqQueue and wait. When the event occurs it will // call processQueue() again - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } // Finished processing so clear the queue @@ -218,7 +218,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // Call the task callback with its results merged.onComplete(res) - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // filterQueue filters cancelled requests out of the queue @@ -255,12 +255,12 @@ func (ca *channelAccessor) queueSize() int { // msgWaitComplete is called when the message for a previous task is confirmed // or there is an error. -func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { +func (ca *channelAccessor) msgWaitComplete(ctx context.Context, mcid cid.Cid, err error) { ca.lk.Lock() defer ca.lk.Unlock() // Save the message result to the store - dserr := ca.store.SaveMessageResult(mcid, err) + dserr := ca.store.SaveMessageResult(ctx, mcid, err) if dserr != nil { log.Errorf("saving message result: %s", dserr) } @@ -271,16 +271,16 @@ func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { // The queue may have been waiting for msg completion to proceed, so // process the next queue item if len(ca.fundsReqQueue) > 0 { - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } } -func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) currentAvailableFunds(ctx context.Context, channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { if len(channelID) == 0 { return nil, nil } - channelInfo, err := ca.store.ByChannelID(channelID) + channelInfo, err := ca.store.ByChannelID(ctx, channelID) if err != nil { return nil, err } @@ -302,7 +302,7 @@ func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt typ return nil, err } - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -337,7 +337,7 @@ func (ca *channelAccessor) processTask(ctx context.Context, amt types.BigInt) *p // Get the payment channel for the from/to addresses. // Note: It's ok if we get ErrChannelNotTracked. It just means we need to // create a channel. - channelInfo, err := ca.store.OutboundActiveByFromTo(ca.from, ca.to) + channelInfo, err := ca.store.OutboundActiveByFromTo(ctx, ca.from, ca.to) if err != nil && err != ErrChannelNotTracked { return &paychFundsRes{err: err} } @@ -393,26 +393,26 @@ func (ca *channelAccessor) createPaych(ctx context.Context, amt types.BigInt) (c mcid := smsg.Cid() // Create a new channel in the store - ci, err := ca.store.CreateChannel(ca.from, ca.to, mcid, amt) + ci, err := ca.store.CreateChannel(ctx, ca.from, ca.to, mcid, amt) if err != nil { log.Errorf("creating channel: %s", err) return cid.Undef, err } // Wait for the channel to be created on chain - go ca.waitForPaychCreateMsg(ci.ChannelID, mcid) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, mcid) return mcid, nil } // waitForPaychCreateMsg waits for mcid to appear on chain and stores the robust address of the // created payment channel -func (ca *channelAccessor) waitForPaychCreateMsg(channelID string, mcid cid.Cid) { - err := ca.waitPaychCreateMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitPaychCreateMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Errorf("wait msg: %v", err) @@ -425,7 +425,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Channel creation failed, so remove the channel from the datastore - dserr := ca.store.RemoveChannel(channelID) + dserr := ca.store.RemoveChannel(ctx, channelID) if dserr != nil { log.Errorf("failed to remove channel %s: %s", channelID, dserr) } @@ -449,7 +449,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Store robust address of channel - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Channel = &decodedReturn.RobustAddress channelInfo.Amount = channelInfo.PendingAmount channelInfo.PendingAmount = big.NewInt(0) @@ -475,30 +475,30 @@ func (ca *channelAccessor) addFunds(ctx context.Context, channelInfo *ChannelInf mcid := smsg.Cid() // Store the add funds message CID on the channel - ca.mutateChannelInfo(channelInfo.ChannelID, func(ci *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *ChannelInfo) { ci.PendingAmount = amt ci.AddFundsMsg = &mcid }) // Store a reference from the message CID to the channel, so that we can // look up the channel from the message CID - err = ca.store.SaveNewMessage(channelInfo.ChannelID, mcid) + err = ca.store.SaveNewMessage(ctx, channelInfo.ChannelID, mcid) if err != nil { log.Errorf("saving add funds message CID %s: %s", mcid, err) } - go ca.waitForAddFundsMsg(channelInfo.ChannelID, mcid) + go ca.waitForAddFundsMsg(ctx, channelInfo.ChannelID, mcid) return &mcid, nil } // waitForAddFundsMsg waits for mcid to appear on chain and returns error, if any -func (ca *channelAccessor) waitForAddFundsMsg(channelID string, mcid cid.Cid) { - err := ca.waitAddFundsMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitAddFundsMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Error(err) @@ -512,7 +512,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error ca.lk.Lock() defer ca.lk.Unlock() - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil }) @@ -524,7 +524,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error defer ca.lk.Unlock() // Store updated amount - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Amount = types.BigAdd(channelInfo.Amount, channelInfo.PendingAmount) channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil @@ -534,8 +534,8 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error } // Change the state of the channel in the store -func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*ChannelInfo)) { - channelInfo, err := ca.store.ByChannelID(channelID) +func (ca *channelAccessor) mutateChannelInfo(ctx context.Context, channelID string, mutate func(*ChannelInfo)) { + channelInfo, err := ca.store.ByChannelID(ctx, channelID) // If there's an error reading or writing to the store just log an error. // For now we're assuming it's unlikely to happen in practice. @@ -549,7 +549,7 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan mutate(channelInfo) - err = ca.store.putChannelInfo(channelInfo) + err = ca.store.putChannelInfo(ctx, channelInfo) if err != nil { log.Errorf("Error writing channel info to store: %s", err) } @@ -560,8 +560,8 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan // messages. // Outstanding messages can occur if a create / add funds message was sent and // then the system was shut down or crashed before the result was received. -func (pm *Manager) restartPending() error { - cis, err := pm.store.WithPendingAddFunds() +func (pm *Manager) restartPending(ctx context.Context) error { + cis, err := pm.store.WithPendingAddFunds(ctx) if err != nil { return err } @@ -575,16 +575,16 @@ func (pm *Manager) restartPending() error { if err != nil { return xerrors.Errorf("error initializing payment channel manager %s -> %s: %s", ci.Control, ci.Target, err) } - go ca.waitForPaychCreateMsg(ci.ChannelID, *ci.CreateMsg) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, *ci.CreateMsg) return nil }) } else if ci.AddFundsMsg != nil { group.Go(func() error { - ca, err := pm.accessorByAddress(*ci.Channel) + ca, err := pm.accessorByAddress(ctx, *ci.Channel) if err != nil { return xerrors.Errorf("error initializing payment channel manager %s: %s", ci.Channel, err) } - go ca.waitForAddFundsMsg(ci.ChannelID, *ci.AddFundsMsg) + go ca.waitForAddFundsMsg(ctx, ci.ChannelID, *ci.AddFundsMsg) return nil }) } @@ -598,7 +598,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Lock() // First check if the message has completed - msgInfo, err := ca.store.GetMessage(mcid) + msgInfo, err := ca.store.GetMessage(ctx, mcid) if err != nil { ca.lk.Unlock() @@ -617,7 +617,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Unlock() // Get the channel address - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { return address.Undef, err } @@ -660,7 +660,7 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on res := onMsgRes{err: err} if res.err == nil { // Get the channel associated with the message cid - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { res.err = err } else { @@ -689,6 +689,6 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on return promise } -func (ca *channelAccessor) availableFunds(channelID string) (*api.ChannelAvailableFunds, error) { - return ca.processQueue(channelID) +func (ca *channelAccessor) availableFunds(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { + return ca.processQueue(ctx, channelID) } diff --git a/paychmgr/store.go b/paychmgr/store.go index 343149f93..62849e6be 100644 --- a/paychmgr/store.go +++ b/paychmgr/store.go @@ -2,6 +2,7 @@ package paychmgr import ( "bytes" + "context" "errors" "fmt" @@ -157,26 +158,26 @@ func (ci *ChannelInfo) wasVoucherSubmitted(sv *paych.SignedVoucher) (bool, error // TrackChannel stores a channel, returning an error if the channel was already // being tracked -func (ps *Store) TrackChannel(ci *ChannelInfo) (*ChannelInfo, error) { - _, err := ps.ByAddress(*ci.Channel) +func (ps *Store) TrackChannel(ctx context.Context, ci *ChannelInfo) (*ChannelInfo, error) { + _, err := ps.ByAddress(ctx, *ci.Channel) switch err { default: return nil, err case nil: return nil, fmt.Errorf("already tracking channel: %s", ci.Channel) case ErrChannelNotTracked: - err = ps.putChannelInfo(ci) + err = ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } - return ps.ByAddress(*ci.Channel) + return ps.ByAddress(ctx, *ci.Channel) } } // ListChannels returns the addresses of all channels that have been created -func (ps *Store) ListChannels() ([]address.Address, error) { - cis, err := ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) ListChannels(ctx context.Context) ([]address.Address, error) { + cis, err := ps.findChans(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil }, 0) if err != nil { @@ -193,8 +194,8 @@ func (ps *Store) ListChannels() ([]address.Address, error) { // findChan finds a single channel using the given filter. // If there isn't a channel that matches the filter, returns ErrChannelNotTracked -func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { - cis, err := ps.findChans(filter, 1) +func (ps *Store) findChan(ctx context.Context, filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { + cis, err := ps.findChans(ctx, filter, 1) if err != nil { return nil, err } @@ -208,8 +209,8 @@ func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, erro // findChans loops over all channels, only including those that pass the filter. // max is the maximum number of channels to return. Set to zero to return unlimited channels. -func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyChannelInfo}) +func (ps *Store) findChans(ctx context.Context, filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyChannelInfo}) if err != nil { return nil, err } @@ -251,8 +252,8 @@ func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelIn } // AllocateLane allocates a new lane for the given channel -func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return 0, err } @@ -260,12 +261,12 @@ func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { out := ci.NextLane ci.NextLane++ - return out, ps.putChannelInfo(ci) + return out, ps.putChannelInfo(ctx, ci) } // VouchersForPaych gets the vouchers for the given channel -func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) VouchersForPaych(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return nil, err } @@ -273,17 +274,17 @@ func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { return ci.Vouchers, nil } -func (ps *Store) MarkVoucherSubmitted(ci *ChannelInfo, sv *paych.SignedVoucher) error { +func (ps *Store) MarkVoucherSubmitted(ctx context.Context, ci *ChannelInfo, sv *paych.SignedVoucher) error { err := ci.markVoucherSubmitted(sv) if err != nil { return err } - return ps.putChannelInfo(ci) + return ps.putChannelInfo(ctx, ci) } // ByAddress gets the channel that matches the given address -func (ps *Store) ByAddress(addr address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) ByAddress(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil && *ci.Channel == addr }) } @@ -307,7 +308,7 @@ func dskeyForMsg(mcid cid.Cid) datastore.Key { } // SaveNewMessage is called when a message is sent -func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { +func (ps *Store) SaveNewMessage(ctx context.Context, channelID string, mcid cid.Cid) error { k := dskeyForMsg(mcid) b, err := cborrpc.Dump(&MsgInfo{ChannelID: channelID, MsgCid: mcid}) @@ -315,12 +316,12 @@ func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // SaveMessageResult is called when the result of a message is received -func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) SaveMessageResult(ctx context.Context, mcid cid.Cid, msgErr error) error { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return err } @@ -336,17 +337,17 @@ func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // ByMessageCid gets the channel associated with a message -func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) ByMessageCid(ctx context.Context, mcid cid.Cid) (*ChannelInfo, error) { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return nil, err } - ci, err := ps.findChan(func(ci *ChannelInfo) bool { + ci, err := ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.ChannelID == minfo.ChannelID }) if err != nil { @@ -357,10 +358,10 @@ func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { } // GetMessage gets the message info for a given message CID -func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { +func (ps *Store) GetMessage(ctx context.Context, mcid cid.Cid) (*MsgInfo, error) { k := dskeyForMsg(mcid) - val, err := ps.ds.Get(k) + val, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -375,8 +376,8 @@ func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { // OutboundActiveByFromTo looks for outbound channels that have not been // settled, with the given from / to addresses -func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) OutboundActiveByFromTo(ctx context.Context, from address.Address, to address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -390,8 +391,8 @@ func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address // WithPendingAddFunds is used on startup to find channels for which a // create channel or add funds message has been sent, but lotus shut down // before the response was received. -func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { - return ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) WithPendingAddFunds(ctx context.Context) ([]ChannelInfo, error) { + return ps.findChans(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -400,10 +401,10 @@ func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { } // ByChannelID gets channel info by channel ID -func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { +func (ps *Store) ByChannelID(ctx context.Context, channelID string) (*ChannelInfo, error) { var stored ChannelInfo - res, err := ps.ds.Get(dskeyForChannel(channelID)) + res, err := ps.ds.Get(ctx, dskeyForChannel(channelID)) if err != nil { if err == datastore.ErrNotFound { return nil, ErrChannelNotTracked @@ -415,7 +416,7 @@ func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { } // CreateChannel creates an outbound channel for the given from / to -func (ps *Store) CreateChannel(from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { +func (ps *Store) CreateChannel(ctx context.Context, from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { ci := &ChannelInfo{ Direction: DirOutbound, NextLane: 0, @@ -426,13 +427,13 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // Save the new channel - err := ps.putChannelInfo(ci) + err := ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } // Save a reference to the create message - err = ps.SaveNewMessage(ci.ChannelID, createMsgCid) + err = ps.SaveNewMessage(ctx, ci.ChannelID, createMsgCid) if err != nil { return nil, err } @@ -441,8 +442,8 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // RemoveChannel removes the channel with the given channel ID -func (ps *Store) RemoveChannel(channelID string) error { - return ps.ds.Delete(dskeyForChannel(channelID)) +func (ps *Store) RemoveChannel(ctx context.Context, channelID string) error { + return ps.ds.Delete(ctx, dskeyForChannel(channelID)) } // The datastore key used to identify the channel info @@ -451,7 +452,7 @@ func dskeyForChannel(channelID string) datastore.Key { } // putChannelInfo stores the channel info in the datastore -func (ps *Store) putChannelInfo(ci *ChannelInfo) error { +func (ps *Store) putChannelInfo(ctx context.Context, ci *ChannelInfo) error { if len(ci.ChannelID) == 0 { ci.ChannelID = uuid.New().String() } @@ -462,7 +463,7 @@ func (ps *Store) putChannelInfo(ci *ChannelInfo) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // TODO: This is a hack to get around not being able to CBOR marshall a nil diff --git a/paychmgr/store_test.go b/paychmgr/store_test.go index 1ec8895fa..563b82978 100644 --- a/paychmgr/store_test.go +++ b/paychmgr/store_test.go @@ -1,6 +1,7 @@ package paychmgr import ( + "context" "testing" "github.com/filecoin-project/go-address" @@ -12,8 +13,10 @@ import ( ) func TestStore(t *testing.T) { + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) - addrs, err := store.ListChannels() + addrs, err := store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 0) @@ -38,19 +41,19 @@ func TestStore(t *testing.T) { } // Track the channel - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.NoError(t, err) // Tracking same channel again should error - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.Error(t, err) // Track another channel - _, err = store.TrackChannel(ci2) + _, err = store.TrackChannel(ctx, ci2) require.NoError(t, err) // List channels should include all channels - addrs, err = store.ListChannels() + addrs, err = store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 2) t0100, err := address.NewIDAddress(100) @@ -61,25 +64,25 @@ func TestStore(t *testing.T) { require.Contains(t, addrs, t0200) // Request vouchers for channel - vouchers, err := store.VouchersForPaych(*ci.Channel) + vouchers, err := store.VouchersForPaych(ctx, *ci.Channel) require.NoError(t, err) require.Len(t, vouchers, 1) // Requesting voucher for non-existent channel should error - _, err = store.VouchersForPaych(tutils.NewIDAddr(t, 300)) + _, err = store.VouchersForPaych(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) // Allocate lane for channel - lane, err := store.AllocateLane(*ci.Channel) + lane, err := store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(0)) // Allocate next lane for channel - lane, err = store.AllocateLane(*ci.Channel) + lane, err = store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(1)) // Allocate next lane for non-existent channel should error - _, err = store.AllocateLane(tutils.NewIDAddr(t, 300)) + _, err = store.AllocateLane(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) } diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 0b4b17f96..01ff9d8d3 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -112,6 +112,15 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk) } +func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, xerrors.Errorf("faile dto unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerActiveSectors(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) { wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { diff --git a/storage/miner.go b/storage/miner.go index 0b1f66840..c52b786ee 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -86,6 +86,7 @@ type fullNodeFilteredAPI interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error) + StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) @@ -282,7 +283,7 @@ func (wpp *StorageWpp) GenerateCandidates(ctx context.Context, randomness abi.Po return cds, nil } -func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, currEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) { if build.InsecurePoStValidation { return []builtin.PoStProof{{ProofBytes: []byte("valid proof")}}, nil } diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index 01b9546a6..d8ef26835 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -71,8 +71,15 @@ func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) { return m.sealing.CommitPending(ctx) } -func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { - return m.sealing.MarkForUpgrade(id) +func (m *Miner) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error { + return m.sealing.MatchPendingPiecesToOpenSectors(ctx) +} + +func (m *Miner) MarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error { + if snap { + return m.sealing.MarkForSnapUpgrade(ctx, id) + } + return m.sealing.MarkForUpgrade(ctx, id) } func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index ad4ffc0db..231809a9f 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -68,11 +68,11 @@ func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { return sbc } -func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { +func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { st.keyLk.Lock() // TODO: make this multithreaded defer st.keyLk.Unlock() - v, err := st.keys.Get(DealIDToDsKey(dealID)) + v, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = nil } @@ -97,7 +97,7 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o if err != nil { return xerrors.Errorf("serializing refs: %w", err) } - return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow + return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { @@ -107,7 +107,7 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, so.Sector, so.Offset, size) + err = st.writeRef(ctx, d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } @@ -115,8 +115,8 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize return so.Sector, so.Offset, nil } -func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { - res, err := st.keys.Query(query.Query{}) +func (st *SectorBlocks) List(ctx context.Context) (map[uint64][]api.SealedRef, error) { + res, err := st.keys.Query(ctx, query.Query{}) if err != nil { return nil, err } @@ -144,8 +144,8 @@ func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { return out, nil } -func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors - ent, err := st.keys.Get(DealIDToDsKey(dealID)) +func (st *SectorBlocks) GetRefs(ctx context.Context, dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors + ent, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = ErrNotFound } @@ -161,8 +161,8 @@ func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // return refs.Refs, nil } -func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { - refs, err := st.GetRefs(dealID) +func (st *SectorBlocks) GetSize(ctx context.Context, dealID abi.DealID) (uint64, error) { + refs, err := st.GetRefs(ctx, dealID) if err != nil { return 0, err } @@ -170,7 +170,7 @@ func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { return uint64(refs[0].Size), nil } -func (st *SectorBlocks) Has(dealID abi.DealID) (bool, error) { +func (st *SectorBlocks) Has(ctx context.Context, dealID abi.DealID) (bool, error) { // TODO: ensure sector is still there - return st.keys.Has(DealIDToDsKey(dealID)) + return st.keys.Has(ctx, DealIDToDsKey(dealID)) } diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index a2283cb7c..2fcbe770e 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -117,7 +117,7 @@ func (m *mockAPI) startGeneratePoST( completeGeneratePoST CompleteGeneratePoSTCb, ) context.CancelFunc { ctx, cancel := context.WithCancel(ctx) - + log.Errorf("mock posting\n") m.statesLk.Lock() defer m.statesLk.Unlock() m.postStates[deadline.Open] = postStatusProving diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 038ed3ac7..6a86656c7 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -19,8 +19,8 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -181,9 +181,10 @@ func (s *WindowPoStScheduler) runSubmitPoST( post.ChainCommitRand = commRand // Submit PoST - sm, submitErr := s.submitPoStMessage(ctx, post) - if submitErr != nil { - log.Errorf("submit window post failed: %+v", submitErr) + sm, err := s.submitPoStMessage(ctx, post) + if err != nil { + log.Errorf("submit window post failed: %+v", err) + submitErr = err } else { s.recordProofsEvent(post.Partitions, sm.Cid()) } @@ -567,7 +568,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t for retries := 0; ; retries++ { skipCount := uint64(0) var partitions []miner.PoStPartition - var sinfos []proof2.SectorInfo + var xsinfos []proof7.ExtendedSectorInfo for partIdx, partition := range batch { // TODO: Can do this in parallel toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors) @@ -610,14 +611,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t continue } - sinfos = append(sinfos, ssi...) + xsinfos = append(xsinfos, ssi...) partitions = append(partitions, miner.PoStPartition{ Index: uint64(batchPartitionStartIdx + partIdx), Skipped: skipped, }) } - if len(sinfos) == 0 { + if len(xsinfos) == 0 { // nothing to prove for this batch break } @@ -636,14 +637,22 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t return nil, err } - postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, append(abi.PoStRandomness{}, rand...)) + defer func() { + if r := recover(); r != nil { + log.Errorf("recover: %s", r) + } + }() + postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...)) elapsed := time.Since(tsStart) - log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed) - + if err != nil { + log.Errorf("error generating window post: %s", err) + } if err == nil { + // If we proved nothing, something is very wrong. if len(postOut) == 0 { + log.Errorf("len(postOut) == 0") return nil, xerrors.Errorf("received no proofs back from generate window post") } @@ -664,6 +673,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t } // If we generated an incorrect proof, try again. + sinfos := make([]proof7.SectorInfo, len(xsinfos)) + for i, xsi := range xsinfos { + sinfos[i] = proof7.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } if correct, err := s.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{ Randomness: abi.PoStRandomness(checkRand), Proofs: postOut, @@ -686,7 +703,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t } // Proof generation failed, so retry - + log.Debugf("Proof generation failed, retry") if len(ps) == 0 { // If we didn't skip any new sectors, we failed // for some other reason and we need to abort. @@ -714,10 +731,8 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t if !somethingToProve { continue } - posts = append(posts, params) } - return posts, nil } @@ -766,7 +781,7 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net return batches, nil } -func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof2.SectorInfo, error) { +func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof7.ExtendedSectorInfo, error) { sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key()) if err != nil { return nil, err @@ -776,22 +791,24 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, return nil, nil } - substitute := proof2.SectorInfo{ + substitute := proof7.ExtendedSectorInfo{ SectorNumber: sset[0].SectorNumber, SealedCID: sset[0].SealedCID, SealProof: sset[0].SealProof, + SectorKey: sset[0].SectorKeyCID, } - sectorByID := make(map[uint64]proof2.SectorInfo, len(sset)) + sectorByID := make(map[uint64]proof7.ExtendedSectorInfo, len(sset)) for _, sector := range sset { - sectorByID[uint64(sector.SectorNumber)] = proof2.SectorInfo{ + sectorByID[uint64(sector.SectorNumber)] = proof7.ExtendedSectorInfo{ SectorNumber: sector.SectorNumber, SealedCID: sector.SealedCID, SealProof: sector.SealProof, + SectorKey: sector.SectorKeyCID, } } - proofSectors := make([]proof2.SectorInfo, 0, len(sset)) + proofSectors := make([]proof7.ExtendedSectorInfo, 0, len(sset)) if err := allSectors.ForEach(func(sectorNo uint64) error { if info, found := sectorByID[sectorNo]; found { proofSectors = append(proofSectors, info) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 78d9431d4..f3ea5836b 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -5,6 +5,8 @@ import ( "context" "testing" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" @@ -22,12 +24,6 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -35,6 +31,10 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/journal" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) type mockStorageMinerAPI struct { @@ -116,11 +116,11 @@ func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message, type mockProver struct { } -func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof7.ExtendedSectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { panic("implement me") } -func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof2.SectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof7.ExtendedSectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, @@ -132,7 +132,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si type mockVerif struct { } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { panic("implement me") } @@ -149,7 +149,11 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (m mockVerif) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { panic("implement me") } diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index cf13bcf78..308ee5140 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -6,13 +6,13 @@ require ( contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/davecgh/go-spew v1.1.1 - github.com/drand/drand v1.2.1 + github.com/drand/drand v1.3.0 github.com/filecoin-project/go-address v0.0.6 - github.com/filecoin-project/go-data-transfer v1.11.4 - github.com/filecoin-project/go-fil-markets v1.13.4 + github.com/filecoin-project/go-data-transfer v1.12.1 + github.com/filecoin-project/go-fil-markets v1.14.1 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-state-types v0.1.1 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/lotus v0.0.0-00010101000000-000000000000 github.com/filecoin-project/specs-actors v0.9.14 github.com/google/uuid v1.3.0 @@ -20,16 +20,16 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/influxdata/influxdb v1.9.4 // indirect github.com/ipfs/go-cid v0.1.0 - github.com/ipfs/go-datastore v0.4.6 + github.com/ipfs/go-datastore v0.5.1 github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.4.1 + github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-unixfs v0.2.6 - github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 + github.com/ipld/go-car v0.3.3 github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-core v0.9.0 + github.com/libp2p/go-libp2p v0.17.0 + github.com/libp2p/go-libp2p-core v0.13.0 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 github.com/multiformats/go-multiaddr v0.4.1 github.com/testground/sdk-go v0.2.6 diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index 4eca05536..a571661d8 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -37,7 +37,6 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -181,11 +180,11 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7y github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -231,7 +230,6 @@ github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6 github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= @@ -257,11 +255,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= @@ -320,13 +313,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -340,12 +332,13 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -386,9 +379,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= @@ -405,26 +398,24 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.2 h1:SKLRuGdx/6WlolaWKaUzzUYWGGePuARyO4guxOPxvt4= -github.com/filecoin-project/go-commp-utils v0.1.2/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.11.4 h1:jKvlx0/C8HSyLRn/G1P9TjtfBtFU9jbCvCVFmWbyYVQ= -github.com/filecoin-project/go-data-transfer v1.11.4/go.mod h1:2MitLI0ebCkLlPKM7NRggP/t9d+gCcREUKkCKqWRCwU= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= +github.com/filecoin-project/go-data-transfer v1.12.1 h1:gAznAZKySVs2FS6T/vDq7R3f0DewLnxeROe0oOE6bZU= +github.com/filecoin-project/go-data-transfer v1.12.1/go.mod h1:j3HL645YiQFxcM+q7uPlGApILSqeweDABNgZQP7pDYU= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.13.4 h1:NAu+ACelR2mYsj+yJ4iLu8FGqWK50OnU5VF8axkLsSc= -github.com/filecoin-project/go-fil-markets v1.13.4/go.mod h1:aANjXD2XMHWnT2zWpyGWLsWLC24C4mHm0gRm85OpPWE= +github.com/filecoin-project/go-fil-markets v1.14.1 h1:Bx+TSbkAN8K97Hpjgu+MpeRFbXIKH/fNpNp1ZGAEH3I= +github.com/filecoin-project/go-fil-markets v1.14.1/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -434,15 +425,13 @@ github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AG github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= -github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -454,35 +443,35 @@ github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/g github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a h1:MS1mtAhZh0iSE7OxP1bb6+UNyYKsxg8n51FpHlX1d54= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211230214648-aeae366b083a/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -506,8 +495,6 @@ github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -537,6 +524,11 @@ github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNV github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -663,6 +655,8 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -701,7 +695,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= @@ -779,7 +772,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -887,20 +879,16 @@ github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyq github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= -github.com/ipfs/go-blockservice v0.1.7 h1:yVe9te0M7ow8i+PPkx03YFSpxqzXx594d6h+34D6qMg= -github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -910,7 +898,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -919,15 +906,15 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -935,40 +922,34 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-graphsync v0.10.6 h1:GkYan4EoDslceHaqYo/hxktWtuZ7VmsyRXLdSmoCcBQ= -github.com/ipfs/go-graphsync v0.10.6/go.mod h1:tQMjWNDD/vSz80YLT/VvzrUmy58aF9lR1uCwSLzjWzI= github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= +github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4= +github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -983,12 +964,15 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -1002,17 +986,19 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= @@ -1039,15 +1025,15 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.4.1 h1:CEEQZnwRkszN06oezuasHwDD823Xcr4p4zluUN9vXqs= -github.com/ipfs/go-merkledag v0.4.1/go.mod h1:56biPaS6e+IS0eXkEt6A8tG+BUQaEIFqDqJuFfQDBoE= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= @@ -1055,10 +1041,9 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= -github.com/ipfs/go-peertaskqueue v0.6.0 h1:BT1/PuNViVomiz1PnnP5+WmKsTNHrxIDvkZrkj4JhOg= -github.com/ipfs/go-peertaskqueue v0.6.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -1073,33 +1058,28 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrpVwEdoX4KoxiTEX8cZG97v/hTDw= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.1.0 h1:t8R/WXUSkfu1K1gpPk76mytCxsEdMjGcMIgpOq3/Cnw= -github.com/ipld/go-car/v2 v2.1.0/go.mod h1:Xr6GwkDhv8dtOtgHzOynAkIOg0t0YiPc5DxBPppWqZA= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= -github.com/ipld/go-ipld-prime v0.14.2 h1:P5fO2usnisXwrN/1sR5exCgEvINg/w/27EuYPKB/zx8= -github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= +github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= @@ -1109,7 +1089,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -1132,8 +1111,9 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1185,7 +1165,6 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDK github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= @@ -1228,8 +1207,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1243,21 +1223,20 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= @@ -1265,17 +1244,19 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= @@ -1287,9 +1268,9 @@ github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCy github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1319,8 +1300,12 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1331,8 +1316,8 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1340,8 +1325,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1360,17 +1345,17 @@ github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aD github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1387,24 +1372,27 @@ github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.3.0 h1:wp/G0+37+GLr7tu+wE+4GWNrA3uxKg6IPRigIMSS5oQ= -github.com/libp2p/go-libp2p-peerstore v0.3.0/go.mod h1:fNX9WlOENMvdx/YD7YO/5Hkrn8+lQIk5A39BHa1HIrM= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= -github.com/libp2p/go-libp2p-pubsub v0.5.6 h1:YkO3gG9J1mQBEMRrM5obiG3JD0L8RcrzIpoeLeiYqH8= -github.com/libp2p/go-libp2p-pubsub v0.5.6/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1:2lH7rMlvDPSvXeOR+g7FE6aqiEwxtpxWKQL8uigk5fQ= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1429,10 +1417,11 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1442,22 +1431,26 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1469,10 +1462,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= -github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1489,12 +1482,14 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1507,13 +1502,15 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1528,11 +1525,11 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1550,23 +1547,23 @@ github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1592,12 +1589,12 @@ github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZb github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1646,7 +1643,6 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1736,8 +1732,6 @@ github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= @@ -1755,7 +1749,6 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1888,7 +1881,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1926,7 +1918,6 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1937,8 +1928,8 @@ github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -2128,7 +2119,6 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -2182,6 +2172,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -2217,13 +2208,28 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2295,7 +2301,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2336,7 +2341,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2367,7 +2371,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2421,10 +2424,8 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2440,6 +2441,7 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= @@ -2572,6 +2574,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2579,6 +2582,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2680,16 +2684,15 @@ golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2766,7 +2769,6 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= diff --git a/testplans/lotus-soup/testkit/retrieval.go b/testplans/lotus-soup/testkit/retrieval.go index 3d6683d00..67e8d1654 100644 --- a/testplans/lotus-soup/testkit/retrieval.go +++ b/testplans/lotus-soup/testkit/retrieval.go @@ -78,7 +78,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, func ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), bytes.NewReader(rdata)) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go index fc821cd4d..7204c71fe 100644 --- a/testplans/lotus-soup/testkit/role_miner.go +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -182,7 +182,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return nil, err } - err = ds.Put(datastore.NewKey("miner-address"), minerAddr.Bytes()) + err = ds.Put(context.Background(), datastore.NewKey("miner-address"), minerAddr.Bytes()) if err != nil { return nil, err } diff --git a/testplans/lotus-soup/testkit/role_pubsub_tracer.go b/testplans/lotus-soup/testkit/role_pubsub_tracer.go index 5b13e6b81..401a9824d 100644 --- a/testplans/lotus-soup/testkit/role_pubsub_tracer.go +++ b/testplans/lotus-soup/testkit/role_pubsub_tracer.go @@ -30,7 +30,7 @@ func PreparePubsubTracer(t *TestEnvironment) (*PubsubTracer, error) { tracedIP := t.NetClient.MustGetDataNetworkIP().String() tracedAddr := fmt.Sprintf("/ip4/%s/tcp/4001", tracedIP) - host, err := libp2p.New(ctx, + host, err := libp2p.New( libp2p.Identity(privk), libp2p.ListenAddrStrings(tracedAddr), ) diff --git a/tools/packer/repo/config.toml b/tools/packer/repo/config.toml index 900dad218..380d5a28f 100644 --- a/tools/packer/repo/config.toml +++ b/tools/packer/repo/config.toml @@ -21,6 +21,7 @@ ListenAddresses = ["/ip4/0.0.0.0/tcp/5678", "/ip6/::/tcp/5678"] # IpfsMAddr = "" # IpfsUseForRetrieval = false # SimultaneousTransfersForStorage = 20 +# SimultaneousTransfersForStoragePerClient = 0 # SimultaneousTransfersForRetrieval = 20 # [Metrics]