diff --git a/.circleci/config.yml b/.circleci/config.yml index 90db3a626..0301a9fe8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,12 +7,12 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.20.7 + - image: cimg/go:1.21.7 resource_class: medium+ golang-2xl: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.20.7 + - image: cimg/go:1.21.7 resource_class: 2xlarge ubuntu: docker: @@ -164,7 +164,7 @@ jobs: default: unit description: Test suite name to report to CircleCI. docker: - - image: cimg/go:1.20 + - image: cimg/go:1.21 environment: LOTUS_HARMONYDB_HOSTS: yugabyte - image: yugabytedb/yugabyte:2.18.0.0-b65 @@ -218,7 +218,7 @@ jobs: test with. If empty (the default) the commit defined by the git submodule is used. docker: - - image: cimg/go:1.20 + - image: cimg/go:1.21 resource_class: << parameters.resource_class >> steps: - install-ubuntu-deps @@ -412,7 +412,7 @@ jobs: description: | Arguments to pass to golangci-lint docker: - - image: cimg/go:1.20 + - image: cimg/go:1.21 resource_class: medium+ steps: - install-ubuntu-deps @@ -663,6 +663,18 @@ workflows: - build suite: itest-decode_params target: "./itests/decode_params_test.go" + - test: + name: test-itest-direct_data_onboard + requires: + - build + suite: itest-direct_data_onboard + target: "./itests/direct_data_onboard_test.go" + - test: + name: test-itest-direct_data_onboard_verified + requires: + - build + suite: itest-direct_data_onboard_verified + target: "./itests/direct_data_onboard_verified_test.go" - test: name: test-itest-dup_mpool_messages requires: @@ -879,6 +891,12 @@ workflows: - build suite: itest-pending_deal_allocation target: "./itests/pending_deal_allocation_test.go" + - test: + name: test-itest-raft_messagesigner + requires: + - build + suite: itest-raft_messagesigner + target: "./itests/raft_messagesigner_test.go" - test: name: test-itest-remove_verifreg_datacap requires: @@ -927,6 +945,7 @@ workflows: - build suite: itest-sector_pledge target: "./itests/sector_pledge_test.go" + resource_class: 2xlarge get-params: true - test: diff --git a/.circleci/template.yml b/.circleci/template.yml index 9011f1a86..c0644c80d 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -7,12 +7,12 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.20.7 + - image: cimg/go:1.21.7 resource_class: medium+ golang-2xl: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.20.7 + - image: cimg/go:1.21.7 resource_class: 2xlarge ubuntu: docker: @@ -551,7 +551,7 @@ workflows: - build suite: itest-[[ $name ]] target: "./itests/[[ $file ]]" - [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]] + [[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config") (eq $name "sector_pledge")]] resource_class: 2xlarge [[- end]] [[- if or (eq $name "wdpost") (eq $name "sector_pledge")]] diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cf98e889..3a8b30abc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,260 @@ # UNRELEASED +## New features + ## Improvements +# v1.26.2 / 2024-04-08 + +**This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.** + +There is an update in the upgrade epoch for nv22, you can read the [full discussion in Slack here.](https://filecoinproject.slack.com/archives/C05P37R9KQD/p1712548103521969) + +The new upgrade epoch is scheduled to be on **epoch `3855360 - 2024-04-24 - 14:00:00Z`**. That means: + +- **All mainnet node operators that have upgraded to v1.26.x, must upgrade to this patch release before 2024-04-11T14:00:00Z.** +- **All mainnet node operators that are on a version lower the v1.26.x, must upgrade to this patch release before 2024-04-24T14:00:00Z.** + +This patch also includes fixes for node operators who want to index builtin-actor events after the nv22 upgrade. Specifically, it ensures the builtin actor event entries are ordered by insertion order when selected ([#11834](https://github.com/filecoin-project/lotus/pull/11834)). It also includes a couple Lotus-Miner patch fixes, ensuring that SnapDeals works properly and are using the new ProveReplicaUpdate3 message after the network version 22 upgrade, ensuring that DDO-sectors has the correct sector expirations, as well as DDO-sector visibility in the `lotus-miner sectors list` cmd. + +## Upgrade Warnings + +For users currently on a version of Lotus lower than v1.26.0, please note that **this release requires a minimum Go version of v1.21.7 or higher to successfully build Lotus.** + +## v1.26.x Inclusions + +See the [v1.26.0](#v1260--2024-03-21) release notes below for inclusions and notes on the v1.26.x series. + +* [v13 Builtin Actor Bundle](#v13-builtin-actor-bundle) +* [Migration](#migration) +* [New features](#new-features-1) + * [Tracing API](#tracing-api) + * [Ethereum Tracing API (`trace_block` and `trace_replayBlockTransactions`)](#ethereum-tracing-api-trace_block-and-trace_replayblocktransactions) + * [GetActorEventsRaw and SubscribeActorEventsRaw](#getactoreventsraw-and-subscribeactoreventsraw) + * [Events Configuration Changes](#events-configuration-changes) + * [GetAllClaims and GetAllAlocations](#getallclaims-and-getallalocations) + * [Lotus CLI](#lotus-cli) + +#v1260--2024-03-21 + +# v1.26.1 / 2024-03-27 + +***RETRACTED: Due to a change in network version 22 upgrade epoch, Lotus v1.26.1 should not be used prior to the new upgrade epoch. See v1.26.2 release notes above.*** + +**This is a patch release for the Calibration network user.** The Calibration network is scheduled for an upgrade to include the two additional built-in actor events to ease the transition and observability of DDO for the ecosystem ([#964](https://github.com/filecoin-project/FIPs/pull/964) and [#968](https://github.com/filecoin-project/FIPs/pull/968)). + +The agreed-upon epoch between the Filecoin implementer team for the update is `1493854`, corresponding to `2024-04-03T11:00:00Z`. All Calibration network users need to upgrade to this patch release before that. + + **Lotus Mainnet Users**: For users on the Mainnet, the [Lotus v1.26.0](https://github.com/filecoin-project/lotus/releases/tag/v1.26.0) release already includes the aforementioned events in preparation for the Mainnet nv22 upgrade. Therefore, both v1.26.0 and v1.26.1 versions are suitable for use on the Mainnet for the coming network version 22 upgrade. + +# v1.26.0 / 2024-03-21 + +***RETRACTED: Due to a change in network version 22 upgrade epoch, Lotus v1.26.0 should not be used prior to the new upgrade epoch. See v1.26.2 release notes above.*** + +This is the stable release for the upcoming MANDATORY Filecoin network upgrade v22, codenamed Dragon 🐉, at `epoch 3817920 - 2024-04-11 - 14:00:00Z` + +The Filecoin network version 22 delivers the following FIPs: + +- [FIP-0063: Switching to new Drand mainnet network](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0063.md) +- [FIP-0074: Remove cron-based automatic deal settlement](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0074.md) +- [FIP-0076: Direct data onboarding](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) +- [FIP-0083: Add built-in Actor events in the Verified Registry, Miner and Market Actors](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) + +## ☢️ Upgrade Warnings ☢️ + +- This release requires a minimum Go version of v1.21.7 or higher to successfully build Lotus. + +## v13 Builtin Actor Bundle + +[Builtin actor v13.0.0](https://github.com/filecoin-project/builtin-actors/releases/tag/v13.0.0) is used for supporting this upgrade. Make sure that your lotus actor bundle matches the v13 actors manifest by running the following cli after upgrading: + +``` +lotus state actor-cids --network-version=22 +Network Version: 22 +Actor Version: 13 +Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e + +Actor CID +account bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52 +cron bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc +datacap bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci +eam bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk +ethaccount bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei +evm bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q +init bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai +multisig bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc +paymentchannel bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2 +placeholder bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro +reward bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu +storagemarket bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu +storageminer bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2 +storagepower bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e +system bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e +verifiedregistry bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta +``` + +## Migration + +We are expecting a bit heavier than normal state migration for this upgrade due to the amount of state changes introduced with Direct Data Onboarding. + +All node operators, including storage providers, should be aware that ONE pre-migration is being scheduled 120 epochs before the upgrade. It will take around 10-20 minutes for the pre-migration and less than 30 seconds for the final migration, depending on the amount of historical state in the node blockstore and the hardware specs the node is running on. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries + +We recommend node operators (who haven't enabled splitstore discard mode) that do not care about historical chain states, to prune the chain blockstore by syncing from a snapshot 1-2 days before the upgrade. + +You can test out the migration by running running the [`benchmarking a network migration` tutorial.](https://lotus.filecoin.io/kb/test-migration/) + +For certain node operators, such as full archival nodes or systems that need to keep large amounts of state (RPC providers), completing the pre-migration in time before the network upgrade might not be achievable. For those node operators, it is recommended to skip the pre-migration and run the non-cached migration (i.e., just running the migration at the exact upgrade epoch), and schedule for some downtime during the upgrade epoch. Operators of such nodes can read the [`How to disable premigration in network upgrade` tutorial.](https://lotus.filecoin.io/kb/disable-premigration/) + +## New features +- feat: api: new verified registry methods to get all allocations and claims (#11631) ([filecoin-project/lotus#11631](https://github.com/filecoin-project/lotus/pull/11631)) +- feat: sealing: Support nv22 DDO features in the sealing pipeline (#11226) ([filecoin-project/lotus#11226](https://github.com/filecoin-project/lotus/pull/11226)) +- feat: implement FIP-0063 ([filecoin-project/lotus#11572](https://github.com/filecoin-project/lotus/pull/11572)) +- feat: events: Add Lotus APIs to consume smart contract and built-in actor events ([filecoin-project/lotus#11618](https://github.com/filecoin-project/lotus/pull/11618)) + +### Tracing API + +Replace the `CodeCid` field in the message trace (added in 1.23.4) with an `InvokedActor` field. + +**Before:** + +```javascript +{ + "Msg": { + "From": ..., + "To": ..., + ... + "CodeCid": ... // The actor's code CID. + } + "MsgRct": ..., + "GasCharges": [], + "Subcalls": [], +} +``` + +**After:** + +```javascript +{ + "Msg": { + "From": ..., + "To": ... + } + "InvokedActor": { // The invoked actor (ommitted if the actor wasn't invoked). + "Id": 1234, // The ID of the actor. + "State": { // The actor's state object (may change between network versions). + "Code": ..., // The actor's code CID. + "Head": ..., // The actor's state-root (when invoked). + "CallSeqNum": ..., // The actor's nonce. + "Balance": ..., // The actor's balance (when invoked). + "Address": ..., // Delegated address (FEVM only). + } + } + "MsgRct": ..., + "GasCharges": [], + "Subcalls": [], +} +``` + +This means the trace now contains an accurate "snapshot" of the actor at the time of the call, information that may not be present in the final state-tree (e.g., due to reverts). This will hopefully improve the performance and accuracy of indexing services. + +### Ethereum Tracing API (`trace_block` and `trace_replayBlockTransactions`) + +For those with the Ethereum JSON-RPC API enabled, the experimental Ethereum Tracing API has been improved significantly and should be considered "functional". However, it's still new and should be tested extensively before relying on it. This API translates FVM traces to Ethereum-style traces, implementing the OpenEthereum `trace_block` and `trace_replayBlockTransactions` APIs. + +This release fixes numerous bugs with this API and now ABI-encodes non-EVM inputs/outputs as if they were explicit EVM calls to [`handle_filecoin_method`][handlefilecoinmethod] for better block explorer compatibility. + +However, there are some _significant_ limitations: + +1. The Geth APIs are not implemented, only the OpenEthereum (Erigon, etc.) APIs. +2. Block rewards are not (yet) included in the trace. +3. Selfdestruct operations are not included in the trace. +4. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts. + +Additionally, Filecoin is not Ethereum no matter how much we try to provide API/tooling compatibility. This API attempts to translate Filecoin semantics into Ethereum semantics as accurately as possible, but it's hardly the best source of data unless you _need_ Filecoin to look like an Ethereum compatible chain. If you're trying to build a new integration with Filecoin, please use the native `StateCompute` method instead. + +[handlefilecoinmethod]: https://fips.filecoin.io/FIPS/fip-0054.html#handlefilecoinmethod-general-handler-for-method-numbers--1024 + +### GetActorEventsRaw and SubscribeActorEventsRaw + +[FIP-0049](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0049.md) introduced _Actor Events_ that can be emitted by user programmed actors. [FIP-0083](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0083.md) introduces new events emitted by the builtin Verified Registry, Miner and Market Actors. These new events for builtin actors are being activated with network version 22 to coincide with _Direct Data Onboarding_ as defined in [FIP-0076](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0076.md) which introduces additional flexibility for data onboarding. Sector, Deal and DataCap lifecycles can be tracked with these events, providing visibility and options for programmatic responses to changes in state. + +Actor events are available on message receipts, but can now be retrieved from a node using the new `GetActorEventsRaw` and `SubscribeActorEventsRaw` methods. These methods allow for querying and subscribing to actor events, respectively. They depend on the Lotus node both collecting events (with `Fevm.Events.RealTimeFilterAPI` and `Fevm.Events.HistoricFilterAPI`) and being enabled with the new configuration option `Events.EnableActorEventsAPI`. Note that a Lotus node can only respond to requests for historic events that it retains in its event store. + +Both `GetActorEventsRaw` and `SubscribeActorEventsRaw` take a filter parameter which can optionally filter events on: + +* `Addresses` of the actor(s) emitting the event +* Specific `Fields` within the event +* `FromHeight` and `ToHeight` to filter events by block height +* `TipSetKey` to restrict events contained within a specific tipset + +`GetActorEventsRaw` provides a one-time query for actor events, while `SubscribeActorEventsRaw` provides a long-lived connection (via websockets) to the Lotus node, allowing for real-time updates on actor events. The subscription can be cancelled by the client at any time. + +A future Lotus release may include `GetActorEvents` and `SubscribeActorEvents` methods which will provide a more user-friendly interface to actor events, including deserialization of event data. + +### Events Configuration Changes + +All configuration options previously under `Fevm.Events` are now in the top-level `Events` section along with the new `Events.EnableActorEventsAPI` option mentioned above. If you have non-default options in `[Events]` under `[Fevm]` in your configuration file, please move them to the top-level `[Events]`. + +While `Fevm.Events.*` options are deprecated and replaced by `Events.*`, any existing custom values will be respected if their new form isn't set, but a warning will be printed to standard error upon startup. Support for these deprecated options will be removed in a future Lotus release, so please migrate your configuration promptly. + +### GetAllClaims and GetAllAlocations + +Additionally the methods `GetAllAllocations` and `GetAllClaims` has been added to the Lotus API. These methods lists all the available allocations and claims available in the actor state. + +### Lotus CLI + +The `filplus` commands used for listing allocations and claims have been updated. If no argument is provided to the either command, they will list out all the allocations and claims in the verified registry actor. +The output list columns have been modified to `AllocationID` and `ClaimID` instead of ID. + +```shell +lotus filplus list-allocations --help +NAME: + lotus filplus list-allocations - List allocations available in verified registry actor or made by a client if specified + +USAGE: + lotus filplus list-allocations [command options] clientAddress + +OPTIONS: + --expired list only expired allocations (default: false) + --json output results in json format (default: false) + --help, -h show help + + +lotus filplus list-claims --help +NAME: + lotus filplus list-claims - List claims available in verified registry actor or made by provider if specified + +USAGE: + lotus filplus list-claims [command options] providerAddress + +OPTIONS: + --expired list only expired claims (default: false) + --help, -h show help +``` + +## Dependencies +- github.com/filecoin-project/go-state-types (v0.12.8 -> v0.13.1) +- chore: deps: update to go-state-types v13.0.0-rc.1 ([filecoin-project/lotus#11662](https://github.com/filecoin-project/lotus/pull/11662)) +- chore: deps: update to go-state-types v13.0.0-rc.2 ([filecoin-project/lotus#11675](https://github.com/filecoin-project/lotus/pull/11675)) +- chore: deps: update to go-multiaddr v0.12.2 (#11602) ([filecoin-project/lotus#11602](https://github.com/filecoin-project/lotus/pull/11602)) +- feat: fvm: update the FVM/FFI to v4.1 (#11608) (#11612) ([filecoin-project/lotus#11612](https://github.com/filecoin-project/lotus/pull/11612)) +- chore: deps: update builtin-actors, GST, verified claims tests ([filecoin-project/lotus#11768](https://github.com/filecoin-project/lotus/pull/11768)) + +## Others +- Remove PL operated bootstrap nodes from mainnet.pi ([filecoin-project/lotus#11491](https://github.com/filecoin-project/lotus/pull/11491)) +- Update epoch heights (#11637) ([filecoin-project/lotus#11637](https://github.com/filecoin-project/lotus/pull/11637)) +- chore: Set upgrade heights and change codename ([filecoin-project/lotus#11599](https://github.com/filecoin-project/lotus/pull/11599)) +- chore:: backport #11609 to the feat/nv22 branch (#11644) ([filecoin-project/lotus#11644](https://github.com/filecoin-project/lotus/pull/11644)) +- fix: add UpgradePhoenixHeight to StateGetNetworkParams (#11648) ([filecoin-project/lotus#11648](https://github.com/filecoin-project/lotus/pull/11648)) +- feat: drand quicknet: allow scheduling drand quicknet upgrade before nv22 on 2k devnet ([filecoin-project/lotus#11667]https://github.com/filecoin-project/lotus/pull/11667) +- chore: backport #11632 to release/v1.26.0 ([filecoin-project/lotus#11667](https://github.com/filecoin-project/lotus/pull/11667)) +- release: bump to v1.26.0-rc2 ([filecoin-project/lotus#11691](https://github.com/filecoin-project/lotus/pull/11691)) +- Docs: Drand: document the meaning of "IsChained ([filecoin-project/lotus#11692](https://github.com/filecoin-project/lotus/pull/11692)) +- chore: remove old calibnet bootstrappers ([filecoin-project/lotus#11702](https://github.com/filecoin-project/lotus/pull/11702)) +- chore: Add lotus-provider to build to match install ([filecoin-project/lotus#11616](https://github.com/filecoin-project/lotus/pull/11616)) +- new: add forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636)) + # v1.25.2 / 2024-01-11 This is an optional but **highly recommended feature release** of Lotus, as it includes fixes for synchronizations issues that users have experienced. The feature release also introduces `Lotus-Provider` in its alpha testing phase, as well as the ability to call external PC2-binaries during the sealing process. @@ -283,6 +535,7 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization - fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480)) - fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11483](https://github.com/filecoin-project/lotus/pull/11483)) - chore: fix: sql Scan cannot write to an object ([filecoin-project/lotus#11487](https://github.com/filecoin-project/lotus/pull/11487)) +- fix: Exclude reverted events in `eth_getLogs` results [filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318) ## Dependencies - deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998)) diff --git a/Dockerfile b/Dockerfile index c9750a71f..ae83911d3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ ##################################### -FROM golang:1.20.7-bullseye AS lotus-builder +FROM golang:1.21.7-bullseye AS lotus-builder MAINTAINER Lotus Development Team RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev diff --git a/GO_VERSION_MIN b/GO_VERSION_MIN index 8909929f6..8819d012c 100644 --- a/GO_VERSION_MIN +++ b/GO_VERSION_MIN @@ -1 +1 @@ -1.20.7 +1.21.7 diff --git a/Makefile b/Makefile index a17b50d09..236d2d98e 100644 --- a/Makefile +++ b/Makefile @@ -124,7 +124,7 @@ lotus-gateway: $(BUILD_DEPS) .PHONY: lotus-gateway BINS+=lotus-gateway -build: lotus lotus-miner lotus-worker +build: lotus lotus-miner lotus-worker lotus-provider @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true diff --git a/README.md b/README.md index c944d41e6..dd4ff3b54 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l #### Go -To build Lotus, you need a working installation of [Go 1.20.10 or higher](https://golang.org/dl/): +To build Lotus, you need a working installation of [Go 1.21.7 or higher](https://golang.org/dl/): ```bash -wget -c https://golang.org/dl/go1.20.10.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +wget -c https://golang.org/dl/go1.21.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local ``` **TIP:** diff --git a/api/api_full.go b/api/api_full.go index 3dc7f8bb2..23a50471b 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -20,7 +20,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin/v8/paych" - "github.com/filecoin-project/go-state-types/builtin/v9/market" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" @@ -28,8 +27,10 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -552,14 +553,20 @@ type FullNode interface { // StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if // pending allocation is not found. StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read + // StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) //perm:read // StateGetAllocation returns the allocation for a given address and allocation ID. StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read + // StateGetAllAllocations returns the all the allocations available in verified registry actor. + StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetClaim returns the claim for a given address and claim ID. StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read // StateGetClaims returns the all the claims for a given provider. StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read + // StateGetAllClaims returns the all the claims available in verified registry actor. + StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read // StateComputeDataCID computes DataCID from a set of on-chain deals StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read // StateLookupID retrieves the ID address of the given address @@ -868,9 +875,26 @@ type FullNode interface { Web3ClientVersion(ctx context.Context) (string, error) //perm:read // TraceAPI related methods + + // Returns an OpenEthereum-compatible trace of the given block (implementing `trace_block`), + // translating Filecoin semantics into Ethereum semantics and tracing both EVM and FVM calls. // - // Returns traces created at given block + // Features: + // + // - FVM actor create events, calls, etc. show up as if they were EVM smart contract events. + // - Native FVM call inputs are ABI-encoded (Solidity ABI) as if they were calls to a + // `handle_filecoin_method(uint64 method, uint64 codec, bytes params)` function + // (where `codec` is the IPLD codec of `params`). + // - Native FVM call outputs (return values) are ABI-encoded as `(uint32 exit_code, uint64 + // codec, bytes output)` where `codec` is the IPLD codec of `output`. + // + // Limitations (for now): + // + // 1. Block rewards are not included in the trace. + // 2. SELFDESTRUCT operations are not included in the trace. + // 3. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts. EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) //perm:read + // Replays all transactions in a block returning the requested traces for each transaction EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read @@ -879,6 +903,36 @@ type FullNode interface { // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin + + RaftState(ctx context.Context) (*RaftStateData, error) //perm:read + RaftLeader(ctx context.Context) (peer.ID, error) //perm:read + + // Actor events + + // GetActorEventsRaw returns all user-programmed and built-in actor events that match the given + // filter. + // This is a request/response API. + // Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange + // configuration options and also the amount of historical data available in the node. + // + // This is an EXPERIMENTAL API and may be subject to change. + GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) //perm:read + + // SubscribeActorEventsRaw returns a long-lived stream of all user-programmed and built-in actor + // events that match the given filter. + // Events that match the given filter are written to the stream in real-time as they are emitted + // from the FVM. + // The response stream is closed when the client disconnects, when a ToHeight is specified and is + // reached, or if there is an error while writing an event to the stream. + // This API also allows clients to read all historical events matching the given filter before any + // real-time events are written to the response stream if the filter specifies an earlier + // FromHeight. + // Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange + // configuration options and also the amount of historical data available in the node. + // + // Note: this API is only available via websocket connections. + // This is an EXPERIMENTAL API and may be subject to change. + SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) //perm:read } // reverse interface to the client, called after EthSubscribe @@ -1114,9 +1168,47 @@ type MarketBalance struct { Locked big.Int } +type MarketDealState struct { + SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated + SlashEpoch abi.ChainEpoch // -1 if deal never slashed +} + +func MakeDealState(mds market.DealState) MarketDealState { + return MarketDealState{ + SectorStartEpoch: mds.SectorStartEpoch(), + LastUpdatedEpoch: mds.LastUpdatedEpoch(), + SlashEpoch: mds.SlashEpoch(), + } +} + +type mstate struct { + s MarketDealState +} + +func (m mstate) SectorStartEpoch() abi.ChainEpoch { + return m.s.SectorStartEpoch +} + +func (m mstate) LastUpdatedEpoch() abi.ChainEpoch { + return m.s.LastUpdatedEpoch +} + +func (m mstate) SlashEpoch() abi.ChainEpoch { + return m.s.SlashEpoch +} + +func (m mstate) Equals(o market.DealState) bool { + return market.DealStatesEqual(m, o) +} + +func (m MarketDealState) Iface() market.DealState { + return mstate{m} +} + type MarketDeal struct { Proposal market.DealProposal - State market.DealState + State MarketDealState } type RetrievalOrder struct { diff --git a/api/api_gateway.go b/api/api_gateway.go index 238bf43ab..2a30ae501 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -129,4 +129,8 @@ type Gateway interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + + GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) + ChainGetEvents(context.Context, cid.Cid) ([]types.Event, error) } diff --git a/api/api_storage.go b/api/api_storage.go index d5b3d5c1d..b24ee2af3 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -24,6 +24,7 @@ import ( builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -75,7 +76,7 @@ type StorageMiner interface { // Add piece to an open sector. If no sectors with enough space are open, // either a new sector will be created, or this call will block until more // sectors can be created. - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (SectorOffset, error) //perm:admin SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin @@ -353,10 +354,21 @@ type SectorLog struct { } type SectorPiece struct { - Piece abi.PieceInfo - DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces) + Piece abi.PieceInfo + + // DealInfo is nil for pieces which do not appear in deals (e.g. filler pieces) + // NOTE: DDO pieces which aren't associated with a market deal and have no + // verified allocation will still have a non-nil DealInfo. + // nil DealInfo indicates that the piece is a filler, and has zero piece commitment. + DealInfo *piece.PieceDealInfo } +// DEPRECATED: Use piece.PieceDealInfo instead +type PieceDealInfo = piece.PieceDealInfo + +// DEPRECATED: Use piece.DealSchedule instead +type DealSchedule = piece.DealSchedule + type SectorInfo struct { SectorID abi.SectorNumber State SectorState @@ -459,28 +471,6 @@ type SectorOffset struct { Offset abi.PaddedPieceSize } -// DealInfo is a tuple of deal identity and its schedule -type PieceDealInfo struct { - // "Old" builtin-market deal info - PublishCid *cid.Cid - DealID abi.DealID - DealProposal *market.DealProposal - - // Common deal info - DealSchedule DealSchedule - - // Best-effort deal asks - KeepUnsealed bool -} - -// DealSchedule communicates the time interval of a storage deal. The deal must -// appear in a sealed (proven) sector no later than StartEpoch, otherwise it -// is invalid. -type DealSchedule struct { - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch -} - // DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that // we expose through JSON-RPC to avoid clients having to depend on the // dagstore lib. diff --git a/api/cbor_gen.go b/api/cbor_gen.go index fd2cb30b4..7a3f97e59 100644 --- a/api/cbor_gen.go +++ b/api/cbor_gen.go @@ -14,7 +14,8 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" - market "github.com/filecoin-project/go-state-types/builtin/v9/market" + + piece "github.com/filecoin-project/lotus/storage/pipeline/piece" ) var _ = xerrors.Errorf @@ -35,7 +36,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { } // t.Channel (address.Address) (struct) - if len("Channel") > cbg.MaxLength { + if len("Channel") > 8192 { return xerrors.Errorf("Value in field \"Channel\" was too long") } @@ -51,7 +52,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { } // t.Vouchers ([]*paych.SignedVoucher) (slice) - if len("Vouchers") > cbg.MaxLength { + if len("Vouchers") > 8192 { return xerrors.Errorf("Value in field \"Vouchers\" was too long") } @@ -62,7 +63,7 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Vouchers) > cbg.MaxLength { + if len(t.Vouchers) > 8192 { return xerrors.Errorf("Slice value in field t.Vouchers was too long") } @@ -73,10 +74,11 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WaitSentinel (cid.Cid) (struct) - if len("WaitSentinel") > cbg.MaxLength { + if len("WaitSentinel") > 8192 { return xerrors.Errorf("Value in field \"WaitSentinel\" was too long") } @@ -123,7 +125,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -150,7 +152,7 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Vouchers: array too large (%d)", extra) } @@ -188,9 +190,9 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WaitSentinel (cid.Cid) (struct) case "WaitSentinel": @@ -226,7 +228,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.Size (abi.UnpaddedPieceSize) (uint64) - if len("Size") > cbg.MaxLength { + if len("Size") > 8192 { return xerrors.Errorf("Value in field \"Size\" was too long") } @@ -242,7 +244,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.Offset (abi.PaddedPieceSize) (uint64) - if len("Offset") > cbg.MaxLength { + if len("Offset") > 8192 { return xerrors.Errorf("Value in field \"Offset\" was too long") } @@ -258,7 +260,7 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error { } // t.SectorID (abi.SectorNumber) (uint64) - if len("SectorID") > cbg.MaxLength { + if len("SectorID") > 8192 { return xerrors.Errorf("Value in field \"SectorID\" was too long") } @@ -305,7 +307,7 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -381,7 +383,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { } // t.Refs ([]api.SealedRef) (slice) - if len("Refs") > cbg.MaxLength { + if len("Refs") > 8192 { return xerrors.Errorf("Value in field \"Refs\" was too long") } @@ -392,7 +394,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { return err } - if len(t.Refs) > cbg.MaxLength { + if len(t.Refs) > 8192 { return xerrors.Errorf("Slice value in field t.Refs was too long") } @@ -403,6 +405,7 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -436,7 +439,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -453,7 +456,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Refs: array too large (%d)", extra) } @@ -481,6 +484,7 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } @@ -505,7 +509,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { } // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { + if len("Epoch") > 8192 { return xerrors.Errorf("Value in field \"Epoch\" was too long") } @@ -527,7 +531,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { } // t.Value (abi.SealRandomness) (slice) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -538,7 +542,7 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -546,9 +550,10 @@ func (t *SealTicket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -581,7 +586,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -594,10 +599,10 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { case "Epoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -624,7 +629,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -635,7 +640,7 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -660,7 +665,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { } // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { + if len("Epoch") > 8192 { return xerrors.Errorf("Value in field \"Epoch\" was too long") } @@ -682,7 +687,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { } // t.Value (abi.InteractiveSealRandomness) (slice) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -693,7 +698,7 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -701,9 +706,10 @@ func (t *SealSeed) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -736,7 +742,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -749,10 +755,10 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { case "Epoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -779,7 +785,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -790,7 +796,7 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } @@ -802,239 +808,6 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{165}); err != nil { - return err - } - - // t.DealID (abi.DealID) (uint64) - if len("DealID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealID\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealID")); err != nil { - return err - } - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { - return err - } - - // t.PublishCid (cid.Cid) (struct) - if len("PublishCid") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"PublishCid\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { - return err - } - if _, err := cw.WriteString(string("PublishCid")); err != nil { - return err - } - - if t.PublishCid == nil { - if _, err := cw.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) - } - } - - // t.DealProposal (market.DealProposal) (struct) - if len("DealProposal") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealProposal\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealProposal")); err != nil { - return err - } - - if err := t.DealProposal.MarshalCBOR(cw); err != nil { - return err - } - - // t.DealSchedule (api.DealSchedule) (struct) - if len("DealSchedule") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"DealSchedule\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { - return err - } - if _, err := cw.WriteString(string("DealSchedule")); err != nil { - return err - } - - if err := t.DealSchedule.MarshalCBOR(cw); err != nil { - return err - } - - // t.KeepUnsealed (bool) (bool) - if len("KeepUnsealed") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { - return err - } - if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { - return err - } - - if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { - return err - } - return nil -} - -func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { - *t = PieceDealInfo{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.DealID (abi.DealID) (uint64) - case "DealID": - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) - - } - // t.PublishCid (cid.Cid) (struct) - case "PublishCid": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) - } - - t.PublishCid = &c - } - - } - // t.DealProposal (market.DealProposal) (struct) - case "DealProposal": - - { - - b, err := cr.ReadByte() - if err != nil { - return err - } - if b != cbg.CborNull[0] { - if err := cr.UnreadByte(); err != nil { - return err - } - t.DealProposal = new(market.DealProposal) - if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.DealSchedule (api.DealSchedule) (struct) - case "DealSchedule": - - { - - if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) - } - - } - // t.KeepUnsealed (bool) (bool) - case "KeepUnsealed": - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.KeepUnsealed = false - case 21: - t.KeepUnsealed = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} func (t *SectorPiece) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) @@ -1048,7 +821,7 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { } // t.Piece (abi.PieceInfo) (struct) - if len("Piece") > cbg.MaxLength { + if len("Piece") > 8192 { return xerrors.Errorf("Value in field \"Piece\" was too long") } @@ -1063,8 +836,8 @@ func (t *SectorPiece) MarshalCBOR(w io.Writer) error { return err } - // t.DealInfo (api.PieceDealInfo) (struct) - if len("DealInfo") > cbg.MaxLength { + // t.DealInfo (piece.PieceDealInfo) (struct) + if len("DealInfo") > 8192 { return xerrors.Errorf("Value in field \"DealInfo\" was too long") } @@ -1110,7 +883,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1129,7 +902,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { } } - // t.DealInfo (api.PieceDealInfo) (struct) + // t.DealInfo (piece.PieceDealInfo) (struct) case "DealInfo": { @@ -1142,7 +915,7 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { if err := cr.UnreadByte(); err != nil { return err } - t.DealInfo = new(PieceDealInfo) + t.DealInfo = new(piece.PieceDealInfo) if err := t.DealInfo.UnmarshalCBOR(cr); err != nil { return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err) } @@ -1158,160 +931,3 @@ func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) { return nil } -func (t *DealSchedule) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write([]byte{162}); err != nil { - return err - } - - // t.EndEpoch (abi.ChainEpoch) (int64) - if len("EndEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"EndEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("EndEpoch")); err != nil { - return err - } - - if t.EndEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { - return err - } - } - - // t.StartEpoch (abi.ChainEpoch) (int64) - if len("StartEpoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"StartEpoch\" was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { - return err - } - if _, err := cw.WriteString(string("StartEpoch")); err != nil { - return err - } - - if t.StartEpoch >= 0 { - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { - return err - } - } else { - if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { - *t = DealSchedule{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadString(cr) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.EndEpoch (abi.ChainEpoch) (int64) - case "EndEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.EndEpoch = abi.ChainEpoch(extraI) - } - // t.StartEpoch (abi.ChainEpoch) (int64) - case "StartEpoch": - { - maj, extra, err := cr.ReadHeader() - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative overflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.StartEpoch = abi.ChainEpoch(extraI) - } - - default: - // Field doesn't exist on this type, so ignore it - cbg.ScanForLinks(r, func(cid.Cid) {}) - } - } - - return nil -} diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index b31c25b86..bf76444e6 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -40,6 +40,7 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -152,13 +153,14 @@ func init() { addExample(map[verifreg.ClaimId]verifreg.Claim{}) addExample(map[string]int{"name": 42}) addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()}) + addExample(abi.ActorID(1000)) + addExample(map[string]types.Actor{ + "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), + }) addExample(&types.ExecutionTrace{ Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace), MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace), }) - addExample(map[string]types.Actor{ - "t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor), - }) addExample(map[string]api.MarketDeal{ "t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), }) @@ -207,7 +209,6 @@ func init() { si := uint64(12) addExample(&si) addExample(retrievalmarket.DealID(5)) - addExample(abi.ActorID(1000)) addExample(map[string]cid.Cid{}) addExample(map[string][]api.SealedRef{ "98000": { @@ -355,6 +356,10 @@ func init() { addExample(map[string]bitfield.BitField{ "": bitfield.NewFromSet([]uint64{5, 6, 7, 10}), }) + addExample(&api.RaftStateData{ + NonceMap: make(map[address.Address]uint64), + MsgUuids: make(map[uuid.UUID]*types.SignedMessage), + }) addExample(http.Header{ "Authorization": []string{"Bearer ey.."}, @@ -402,6 +407,32 @@ func init() { percent := types.Percent(123) addExample(percent) addExample(&percent) + + addExample(&miner.PieceActivationManifest{ + CID: c, + Size: 2032, + VerifiedAllocationKey: nil, + Notify: nil, + }) + + addExample(&types.ActorEventBlock{ + Codec: 0x51, + Value: []byte("ddata"), + }) + + addExample(&types.ActorEventFilter{ + Addresses: []address.Address{addr}, + Fields: map[string][]types.ActorEventBlock{ + "abc": { + { + Codec: 0x51, + Value: []byte("ddata"), + }, + }, + }, + FromHeight: epochPtr(1010), + ToHeight: epochPtr(1020), + }) } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { @@ -507,6 +538,11 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} { return ns.Interface() } +func epochPtr(ei int64) *abi.ChainEpoch { + ep := abi.ChainEpoch(ei) + return &ep +} + type Visitor struct { Root string Methods map[string]ast.Node diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index ed9fe740e..2f4eb2990 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -1626,6 +1626,21 @@ func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } +// GetActorEventsRaw mocks base method. +func (m *MockFullNode) GetActorEventsRaw(arg0 context.Context, arg1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActorEventsRaw", arg0, arg1) + ret0, _ := ret[0].([]*types.ActorEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetActorEventsRaw indicates an expected call of GetActorEventsRaw. +func (mr *MockFullNodeMockRecorder) GetActorEventsRaw(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActorEventsRaw", reflect.TypeOf((*MockFullNode)(nil).GetActorEventsRaw), arg0, arg1) +} + // ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() @@ -2919,6 +2934,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) } +// RaftLeader mocks base method. +func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RaftLeader", arg0) + ret0, _ := ret[0].(peer.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RaftLeader indicates an expected call of RaftLeader. +func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0) +} + +// RaftState mocks base method. +func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RaftState", arg0) + ret0, _ := ret[0].(*api.RaftStateData) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RaftState indicates an expected call of RaftState. +func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0) +} + // Session mocks base method. func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { m.ctrl.T.Helper() @@ -3158,6 +3203,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } +// StateGetAllAllocations mocks base method. +func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllAllocations indicates an expected call of StateGetAllAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1) +} + +// StateGetAllClaims mocks base method. +func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllClaims indicates an expected call of StateGetAllClaims. +func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1) +} + // StateGetAllocation mocks base method. func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() @@ -3188,6 +3263,21 @@ func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockFullNode) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockFullNodeMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetAllocations mocks base method. func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { m.ctrl.T.Helper() @@ -3893,6 +3983,21 @@ func (mr *MockFullNodeMockRecorder) StateWaitMsg(arg0, arg1, arg2, arg3, arg4 in return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateWaitMsg", reflect.TypeOf((*MockFullNode)(nil).StateWaitMsg), arg0, arg1, arg2, arg3, arg4) } +// SubscribeActorEventsRaw mocks base method. +func (m *MockFullNode) SubscribeActorEventsRaw(arg0 context.Context, arg1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubscribeActorEventsRaw", arg0, arg1) + ret0, _ := ret[0].(<-chan *types.ActorEvent) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubscribeActorEventsRaw indicates an expected call of SubscribeActorEventsRaw. +func (mr *MockFullNodeMockRecorder) SubscribeActorEventsRaw(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeActorEventsRaw", reflect.TypeOf((*MockFullNode)(nil).SubscribeActorEventsRaw), arg0, arg1) +} + // SyncCheckBad mocks base method. func (m *MockFullNode) SyncCheckBad(arg0 context.Context, arg1 cid.Cid) (string, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index c07fc3a61..4df81369b 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -35,11 +35,13 @@ import ( apitypes "github.com/filecoin-project/lotus/api/types" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" "github.com/filecoin-project/lotus/journal/alerting" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer/fsutil" "github.com/filecoin-project/lotus/storage/sealer/sealtasks" @@ -333,6 +335,8 @@ type FullNodeMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `perm:"read"` + GetActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) `perm:"read"` + MarketAddBalance func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) `perm:"sign"` MarketGetReserved func(p0 context.Context, p1 address.Address) (types.BigInt, error) `perm:"sign"` @@ -453,6 +457,10 @@ type FullNodeMethods struct { PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"` + RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"` + + RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"` + StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"` @@ -479,10 +487,16 @@ type FullNodeMethods struct { StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` + + StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"` + StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` + StateGetAllocationIdForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) `perm:"read"` + StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"` @@ -577,6 +591,8 @@ type FullNodeMethods struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `perm:"read"` + SubscribeActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) `perm:"read"` + SyncCheckBad func(p0 context.Context, p1 cid.Cid) (string, error) `perm:"read"` SyncCheckpoint func(p0 context.Context, p1 types.TipSetKey) error `perm:"admin"` @@ -639,6 +655,8 @@ type GatewayMethods struct { ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` + ChainGetEvents func(p0 context.Context, p1 cid.Cid) ([]types.Event, error) `` + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` @@ -743,6 +761,8 @@ type GatewayMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + GetActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) `` + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `` MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `` @@ -817,6 +837,8 @@ type GatewayMethods struct { StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` + SubscribeActorEventsRaw func(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) `` + Version func(p0 context.Context) (APIVersion, error) `` WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` @@ -1081,7 +1103,7 @@ type StorageMinerMethods struct { SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` - SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) `perm:"admin"` + SectorAddPieceToAny func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) `perm:"admin"` SectorCommitFlush func(p0 context.Context) ([]sealiface.CommitBatchRes, error) `perm:"admin"` @@ -2572,6 +2594,17 @@ func (s *FullNodeStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messa return nil, ErrNotSupported } +func (s *FullNodeStruct) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if s.Internal.GetActorEventsRaw == nil { + return *new([]*types.ActorEvent), ErrNotSupported + } + return s.Internal.GetActorEventsRaw(p0, p1) +} + +func (s *FullNodeStub) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return *new([]*types.ActorEvent), ErrNotSupported +} + func (s *FullNodeStruct) MarketAddBalance(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt) (cid.Cid, error) { if s.Internal.MarketAddBalance == nil { return *new(cid.Cid), ErrNotSupported @@ -3232,6 +3265,28 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address return *new(cid.Cid), ErrNotSupported } +func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) { + if s.Internal.RaftLeader == nil { + return *new(peer.ID), ErrNotSupported + } + return s.Internal.RaftLeader(p0) +} + +func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) { + return *new(peer.ID), ErrNotSupported +} + +func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) { + if s.Internal.RaftState == nil { + return nil, ErrNotSupported + } + return s.Internal.RaftState(p0) +} + +func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { if s.Internal.StateAccountKey == nil { return *new(address.Address), ErrNotSupported @@ -3375,6 +3430,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + if s.Internal.StateGetAllAllocations == nil { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported + } + return s.Internal.StateGetAllAllocations(p0, p1) +} + +func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported +} + +func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + if s.Internal.StateGetAllClaims == nil { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported + } + return s.Internal.StateGetAllClaims(p0, p1) +} + +func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { if s.Internal.StateGetAllocation == nil { return nil, ErrNotSupported @@ -3397,6 +3474,17 @@ func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 a return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + if s.Internal.StateGetAllocationIdForPendingDeal == nil { + return *new(verifreg.AllocationId), ErrNotSupported + } + return s.Internal.StateGetAllocationIdForPendingDeal(p0, p1, p2) +} + +func (s *FullNodeStub) StateGetAllocationIdForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (verifreg.AllocationId, error) { + return *new(verifreg.AllocationId), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { if s.Internal.StateGetAllocations == nil { return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported @@ -3914,6 +4002,17 @@ func (s *FullNodeStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p return nil, ErrNotSupported } +func (s *FullNodeStruct) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if s.Internal.SubscribeActorEventsRaw == nil { + return nil, ErrNotSupported + } + return s.Internal.SubscribeActorEventsRaw(p0, p1) +} + +func (s *FullNodeStub) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) SyncCheckBad(p0 context.Context, p1 cid.Cid) (string, error) { if s.Internal.SyncCheckBad == nil { return "", ErrNotSupported @@ -4189,6 +4288,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetEvents(p0 context.Context, p1 cid.Cid) ([]types.Event, error) { + if s.Internal.ChainGetEvents == nil { + return *new([]types.Event), ErrNotSupported + } + return s.Internal.ChainGetEvents(p0, p1) +} + +func (s *GatewayStub) ChainGetEvents(p0 context.Context, p1 cid.Cid) ([]types.Event, error) { + return *new([]types.Event), ErrNotSupported +} + func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { if s.Internal.ChainGetGenesis == nil { return nil, ErrNotSupported @@ -4761,6 +4871,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag return nil, ErrNotSupported } +func (s *GatewayStruct) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if s.Internal.GetActorEventsRaw == nil { + return *new([]*types.ActorEvent), ErrNotSupported + } + return s.Internal.GetActorEventsRaw(p0, p1) +} + +func (s *GatewayStub) GetActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return *new([]*types.ActorEvent), ErrNotSupported +} + func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { if s.Internal.MinerGetBaseInfo == nil { return nil, ErrNotSupported @@ -5168,6 +5289,17 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3 return nil, ErrNotSupported } +func (s *GatewayStruct) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if s.Internal.SubscribeActorEventsRaw == nil { + return nil, ErrNotSupported + } + return s.Internal.SubscribeActorEventsRaw(p0, p1) +} + +func (s *GatewayStub) SubscribeActorEventsRaw(p0 context.Context, p1 *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) { if s.Internal.Version == nil { return *new(APIVersion), ErrNotSupported @@ -6400,14 +6532,14 @@ func (s *StorageMinerStub) SectorAbortUpgrade(p0 context.Context, p1 abi.SectorN return ErrNotSupported } -func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStruct) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { if s.Internal.SectorAddPieceToAny == nil { return *new(SectorOffset), ErrNotSupported } return s.Internal.SectorAddPieceToAny(p0, p1, p2, p3) } -func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 PieceDealInfo) (SectorOffset, error) { +func (s *StorageMinerStub) SectorAddPieceToAny(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data, p3 piece.PieceDealInfo) (SectorOffset, error) { return *new(SectorOffset), ErrNotSupported } diff --git a/api/types.go b/api/types.go index 7fd607750..5fe9ffca3 100644 --- a/api/types.go +++ b/api/types.go @@ -69,6 +69,11 @@ type MessageSendSpec struct { MaximizeFeeCap bool } +type MpoolMessageWhole struct { + Msg *types.Message + Spec *MessageSendSpec +} + // GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync type GraphSyncDataTransfer struct { // GraphSync request id for this transfer @@ -344,6 +349,66 @@ type ForkUpgradeParams struct { UpgradeLightningHeight abi.ChainEpoch UpgradeThunderHeight abi.ChainEpoch UpgradeWatermelonHeight abi.ChainEpoch + UpgradeDragonHeight abi.ChainEpoch + UpgradePhoenixHeight abi.ChainEpoch +} + +type NonceMapType map[address.Address]uint64 +type MsgUuidMapType map[uuid.UUID]*types.SignedMessage + +type RaftStateData struct { + NonceMap NonceMapType + MsgUuids MsgUuidMapType +} + +func (n *NonceMapType) MarshalJSON() ([]byte, error) { + marshalled := make(map[string]uint64) + for a, n := range *n { + marshalled[a.String()] = n + } + return json.Marshal(marshalled) +} + +func (n *NonceMapType) UnmarshalJSON(b []byte) error { + unmarshalled := make(map[string]uint64) + err := json.Unmarshal(b, &unmarshalled) + if err != nil { + return err + } + *n = make(map[address.Address]uint64) + for saddr, nonce := range unmarshalled { + a, err := address.NewFromString(saddr) + if err != nil { + return err + } + (*n)[a] = nonce + } + return nil +} + +func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) { + marshalled := make(map[string]*types.SignedMessage) + for u, msg := range *m { + marshalled[u.String()] = msg + } + return json.Marshal(marshalled) +} + +func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error { + unmarshalled := make(map[string]*types.SignedMessage) + err := json.Unmarshal(b, &unmarshalled) + if err != nil { + return err + } + *m = make(map[uuid.UUID]*types.SignedMessage) + for suid, msg := range unmarshalled { + u, err := uuid.Parse(suid) + if err != nil { + return err + } + (*m)[u] = msg + } + return nil } // ChainExportConfig holds configuration for chain ranged exports. diff --git a/api/v0api/full.go b/api/v0api/full.go index d92d5a95c..db84ddc87 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -537,10 +537,14 @@ type FullNode interface { StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read // StateGetAllocations returns the all the allocations for a given client. StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read + // StateGetAllAllocations returns the all the allocations available in verified registry actor. + StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read // StateGetClaim returns the claim for a given address and claim ID. StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read // StateGetClaims returns the all the claims for a given provider. StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read + // StateGetAllClaims returns the all the claims available in verified registry actor. + StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read // StateLookupID retrieves the ID address of the given address StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read // StateAccountKey returns the public key address of the given ID address diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index bd37f6429..90c25d4a7 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -280,6 +280,10 @@ type FullNodeMethods struct { StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"` + StateGetAllAllocations func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"` + + StateGetAllClaims func(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"` + StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"` @@ -1837,6 +1841,28 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2 return nil, ErrNotSupported } +func (s *FullNodeStruct) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + if s.Internal.StateGetAllAllocations == nil { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported + } + return s.Internal.StateGetAllAllocations(p0, p1) +} + +func (s *FullNodeStub) StateGetAllAllocations(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) { + return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported +} + +func (s *FullNodeStruct) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + if s.Internal.StateGetAllClaims == nil { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported + } + return s.Internal.StateGetAllClaims(p0, p1) +} + +func (s *FullNodeStub) StateGetAllClaims(p0 context.Context, p1 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) { + return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported +} + func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) { if s.Internal.StateGetAllocation == nil { return nil, ErrNotSupported diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index 7a722ed25..6b70e0e49 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -2338,6 +2338,36 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2) } +// StateGetAllAllocations mocks base method. +func (m *MockFullNode) StateGetAllAllocations(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllAllocations", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllAllocations indicates an expected call of StateGetAllAllocations. +func (mr *MockFullNodeMockRecorder) StateGetAllAllocations(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllAllocations), arg0, arg1) +} + +// StateGetAllClaims mocks base method. +func (m *MockFullNode) StateGetAllClaims(arg0 context.Context, arg1 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllClaims", arg0, arg1) + ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllClaims indicates an expected call of StateGetAllClaims. +func (mr *MockFullNodeMockRecorder) StateGetAllClaims(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetAllClaims), arg0, arg1) +} + // StateGetAllocation mocks base method. func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/blockstore/cbor_gen.go b/blockstore/cbor_gen.go index 221f13676..c53e9e850 100644 --- a/blockstore/cbor_gen.go +++ b/blockstore/cbor_gen.go @@ -44,7 +44,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { } // t.Cid ([]cid.Cid) (slice) - if len(t.Cid) > cbg.MaxLength { + if len(t.Cid) > 8192 { return xerrors.Errorf("Slice value in field t.Cid was too long") } @@ -60,7 +60,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { } // t.Data ([][]uint8) (slice) - if len(t.Data) > cbg.MaxLength { + if len(t.Data) > 8192 { return xerrors.Errorf("Slice value in field t.Data was too long") } @@ -68,7 +68,7 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } for _, v := range t.Data { - if len(v) > cbg.ByteArrayMaxLen { + if len(v) > 2097152 { return xerrors.Errorf("Byte array in field v was too long") } @@ -76,9 +76,10 @@ func (t *NetRpcReq) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(v[:]); err != nil { + if _, err := cw.Write(v); err != nil { return err } + } return nil } @@ -140,7 +141,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Cid: array too large (%d)", extra) } @@ -171,9 +172,9 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Cid[i] = c } + } } - // t.Data ([][]uint8) (slice) maj, extra, err = cr.ReadHeader() @@ -181,7 +182,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Data: array too large (%d)", extra) } @@ -207,7 +208,7 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -218,12 +219,12 @@ func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) { t.Data[i] = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil { + if _, err := io.ReadFull(cr, t.Data[i]); err != nil { return err } + } } - return nil } @@ -253,7 +254,7 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error { } // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { + if len(t.Data) > 2097152 { return xerrors.Errorf("Byte array in field t.Data was too long") } @@ -261,9 +262,10 @@ func (t *NetRpcResp) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -324,7 +326,7 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -335,9 +337,10 @@ func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } @@ -361,7 +364,7 @@ func (t *NetRpcErr) MarshalCBOR(w io.Writer) error { } // t.Msg (string) (string) - if len(t.Msg) > cbg.MaxLength { + if len(t.Msg) > 8192 { return xerrors.Errorf("Value in field t.Msg was too long") } @@ -426,7 +429,7 @@ func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) { // t.Msg (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 8e4224535..f0606519f 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -5,9 +5,7 @@ import ( "context" "io" - iface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/path" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/multiformats/go-multiaddr" @@ -15,6 +13,8 @@ import ( "golang.org/x/xerrors" rpc "github.com/filecoin-project/kubo-api-client" + iface "github.com/filecoin-project/kubo-api-client/coreiface" + "github.com/filecoin-project/kubo-api-client/coreiface/options" ) type IPFSBlockstore struct { @@ -83,7 +83,7 @@ func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { } func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { - _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid)) + _, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -99,7 +99,7 @@ func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { } func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { - rd, err := i.api.Block().Get(ctx, path.IpldPath(cid)) + rd, err := i.api.Block().Get(ctx, path.FromCid(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) } @@ -113,7 +113,7 @@ func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, er } func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { - st, err := i.api.Block().Stat(ctx, path.IpldPath(cid)) + st, err := i.api.Block().Stat(ctx, path.FromCid(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) } diff --git a/build/actors/v13.tar.zst b/build/actors/v13.tar.zst new file mode 100644 index 000000000..12f6215f0 Binary files /dev/null and b/build/actors/v13.tar.zst differ diff --git a/build/bootstrap/calibnet.pi b/build/bootstrap/calibnet.pi index 4586d93b1..0a14d24ba 100644 --- a/build/bootstrap/calibnet.pi +++ b/build/bootstrap/calibnet.pi @@ -1,5 +1,4 @@ -/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWCi2w8U4DDB9xqrejb5KYHaQv2iA2AJJ6uzG3iQxNLBMy -/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWDTayrBojBn9jWNNUih4nNQQBGJD7Zo3gQCKgBkUsS6dp -/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWNRxTHUn8bf7jz1KEUPMc2dMgGfa4f8ZJTsquVSn3vHCG -/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWFWUqE9jgXvcKHWieYs9nhyp6NF4ftwLGAHm4sCv73jjK /dns4/calibration.node.glif.io/tcp/1237/p2p/12D3KooWQPYouEAsUQKzvFUA9sQ8tz4rfpqtTzh2eL6USd9bwg7x +/dns4/bootstrap-calibnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWABQ5gTDHPWyvhJM7jPhtNwNJruzTEo32Lo4gcS5ABAMm +/dns4/bootstrap-calibnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWS3ZRhMYL67b4bD5XQ6fcpTyVQXnDe8H89LvwrDqaSbiT +/dns4/bootstrap-calibnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWEiBN8jBX8EBoM3M47pVRLRWV812gDRUJhMxgyVkUoR48 diff --git a/build/bootstrap/mainnet.pi b/build/bootstrap/mainnet.pi index 7838158de..5b13ae755 100644 --- a/build/bootstrap/mainnet.pi +++ b/build/bootstrap/mainnet.pi @@ -1,12 +1,9 @@ -/dns4/bootstrap-0.mainnet.filops.net/tcp/1347/p2p/12D3KooWCVe8MmsEMes2FzgTpt9fXtmCY7wrq91GRiaC8PHSCCBj -/dns4/bootstrap-1.mainnet.filops.net/tcp/1347/p2p/12D3KooWCwevHg1yLCvktf2nvLu7L9894mcrJR4MsBCcm4syShVc -/dns4/bootstrap-2.mainnet.filops.net/tcp/1347/p2p/12D3KooWEWVwHGn2yR36gKLozmb4YjDJGerotAPGxmdWZx2nxMC4 -/dns4/bootstrap-6.mainnet.filops.net/tcp/1347/p2p/12D3KooWP5MwCiqdMETF9ub1P3MbCvQCcfconnYHbWg6sUJcDRQQ -/dns4/bootstrap-7.mainnet.filops.net/tcp/1347/p2p/12D3KooWRs3aY1p3juFjPy8gPN95PEQChm2QKGUCAdcDCC4EBMKf -/dns4/bootstrap-8.mainnet.filops.net/tcp/1347/p2p/12D3KooWScFR7385LTyR4zU1bYdzSiiAb5rnNABfVahPvVSzyTkR /dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk /dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz /dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt /dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST +/dns4/bootstrap-mainnet-0.chainsafe-fil.io/tcp/34000/p2p/12D3KooWKKkCZbcigsWTEu1cgNetNbZJqeNtysRtFpq7DTqw3eqH +/dns4/bootstrap-mainnet-1.chainsafe-fil.io/tcp/34000/p2p/12D3KooWGnkd9GQKo3apkShQDaq1d6cKJJmsVe6KiQkacUk1T8oZ +/dns4/bootstrap-mainnet-2.chainsafe-fil.io/tcp/34000/p2p/12D3KooWHQRSDFv4FvAjtU32shQ7znz7oRbLBryXzZ9NMK2feyyH diff --git a/build/builtin_actors.go b/build/builtin_actors.go index 4d4f86d22..6aace0bec 100644 --- a/build/builtin_actors.go +++ b/build/builtin_actors.go @@ -48,6 +48,7 @@ func init() { if NetworkBundle == "calibrationnet" { actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzacecnh2ouohmonvebq7uughh4h3ppmg4cjsk74dzxlbbtlcij4xbzxq"), actorstypes.Version12) actors.AddActorMeta("storageminer", cid.MustParse("bafk2bzaced7emkbbnrewv5uvrokxpf5tlm4jslu2jsv77ofw2yqdglg657uie"), actorstypes.Version12) + actors.AddActorMeta("verifiedregistry", cid.MustParse("bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm"), actorstypes.Version13) } } @@ -194,7 +195,8 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata, // The following manifest cids existed temporarily on the calibnet testnet // We include them in our builtin bundle, but intentionally omit from metadata if root == cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") || - root == cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru") { + root == cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru") || + root == cid.MustParse("bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi") { continue } bundles = append(bundles, &BuiltinActorsMetadata{ diff --git a/build/builtin_actors_gen.go b/build/builtin_actors_gen.go index 11c70f9ec..4d2a6674c 100644 --- a/build/builtin_actors_gen.go +++ b/build/builtin_actors_gen.go @@ -95,10 +95,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceb37hxeuoo5rgf6ansrdl2ykm5v5zp6kireubn4orcopr67jbxv6k"), }, }, { - Network: "butterflynet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), + Network: "butterflynet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacectxvbk77ntedhztd6sszp2btrtvsmy7lp2ypnrk6yl74zb34t2cq"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacebp7anjdtg2sohyt6lromx4xs7nujtwdfcsffnptphaayabx7ysxs"), "cron": MustParseCid("bafk2bzacecu2y3awtemmglpkroiglulc2fj3gpdn6eazdqr6avcautiaighrg"), @@ -117,6 +117,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"), "verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"), }, +}, { + Network: "butterflynet", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"), + "cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"), + "datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"), + "eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"), + "ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"), + "evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"), + "init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"), + "multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"), + "paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"), + "storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"), + "storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"), + "storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"), + "system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"), + "verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"), + }, }, { Network: "calibrationnet", Version: 8, @@ -201,10 +224,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzaceceoo5jlom2zweh7kpye2vkj33wgqnkjshlsw2neemqkfg5g2rmvg"), }, }, { - Network: "calibrationnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), + Network: "calibrationnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacechwwxdqvggkdylm37zldjsra2ivkdzwp7fee56bzxbzs544wv6u6"), "cron": MustParseCid("bafk2bzacec4gdxxkqwxqqodsv6ug5dmdbqdfqwyqfek3yhxc2wweh5psxaeq6"), @@ -223,6 +246,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"), "verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"), }, +}, { + Network: "calibrationnet", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"), + "cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"), + "datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"), + "eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"), + "ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"), + "evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"), + "init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"), + "multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"), + "paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"), + "storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"), + "storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"), + "storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"), + "system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"), + "verifiedregistry": MustParseCid("bafk2bzacebj2zdquagzy2xxn7up574oemg3w7ed3fe4aujkyhgdwj57voesn2"), + }, }, { Network: "caterpillarnet", Version: 8, @@ -316,10 +362,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedaws3or3twy45ltcxucgvqijsje4x675ph6vup2w35smlfneamno"), }, }, { - Network: "caterpillarnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), + Network: "caterpillarnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacebxiub6qsy67asvl5cx33x5vjbuqinalmf3xtnbmokxmmklzdkvei"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacecereuhejfvodut5357cai4lmhsyr7uenhcxvmw6jpmhe6auuly32"), "cron": MustParseCid("bafk2bzacebo2whgy6jla4jsf5j4ovlqm2e4eepedlpw5wadas33yxmunis4b4"), @@ -338,6 +384,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"), "verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"), }, +}, { + Network: "caterpillarnet", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"), + "cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"), + "datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"), + "eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"), + "ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"), + "evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"), + "init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"), + "multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"), + "paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"), + "storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"), + "storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"), + "storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"), + "system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"), + "verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"), + }, }, { Network: "devnet", Version: 8, @@ -422,10 +491,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebdqi5tr5pjnem5nylg2zbqcugvi7oxi35bhnrfudx4y4ufhlit2k"), }, }, { - Network: "devnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), + Network: "devnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceasjdukhhyjbegpli247vbf5h64f7uvxhhebdihuqsj2mwisdwa6o"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacedki4apynvdxxuoigmqkgaktgy2erjftoxqxqaklnelgveyaqknfu"), "cron": MustParseCid("bafk2bzacebjpczf7qtcisy3zdp3sqoohxe75tgupmdo5dr26vh7orzrsjn3b2"), @@ -444,6 +513,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"), "verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"), }, +}, { + Network: "devnet", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"), + "cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"), + "datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"), + "eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"), + "ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"), + "evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"), + "init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"), + "multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"), + "paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"), + "storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"), + "storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"), + "storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"), + "system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"), + "verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"), + }, }, { Network: "hyperspace", Version: 8, @@ -551,10 +643,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacedej3dnr62g2je2abmyjg3xqv4otvh6e26du5fcrhvw7zgcaaez3a"), }, }, { - Network: "mainnet", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), + Network: "mainnet", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceapkgfggvxyllnmuogtwasmsv5qi2qzhc2aybockd6kag2g5lzaio"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzaceboftg75mdiba7xbo2i3uvgtca4brhnr3u5ptihonixgpnrvhpxoa"), "cron": MustParseCid("bafk2bzacechxjkfe2cehx4s7skj3wzfpzf7zolds64khrrrs66bhazsemktls"), @@ -573,6 +665,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"), "verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"), }, +}, { + Network: "mainnet", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"), + "cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"), + "datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"), + "eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"), + "ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"), + "evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"), + "init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"), + "multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"), + "paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"), + "storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"), + "storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"), + "storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"), + "system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"), + "verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"), + }, }, { Network: "testing", Version: 8, @@ -657,10 +772,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), }, }, { - Network: "testing", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), + Network: "testing", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzaceaaxd6ytavsek5bi5soqo7qamezuqfyfjy42es2clpbzu3pwzcmye"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), @@ -679,6 +794,29 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"), + "storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), + }, }, { Network: "testing-fake-proofs", Version: 8, @@ -763,10 +901,10 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "verifiedregistry": MustParseCid("bafk2bzacebp2r56wxadvfzpfbmqwfi3dlnwpmoc5u4tau2hfftbkuafkhye64"), }, }, { - Network: "testing-fake-proofs", - Version: 12, - BundleGitTag: "v12.0.0", - ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), + Network: "testing-fake-proofs", + Version: 12, + + ManifestCid: MustParseCid("bafy2bzacecver4l5d6jiuzubhrtcxjjfdx6jnxbmyp4bselol2atgkhz3e3um"), Actors: map[string]cid.Cid{ "account": MustParseCid("bafk2bzacea74qqkfvacykmq5emzqblh4f4nmxdkiyixxpzs7kkcfnbfa7cb6m"), "cron": MustParseCid("bafk2bzacecotbu7k6awdzfzakf7g5iaas6gswtunjnnb2xm2klqoshjgb4imy"), @@ -785,4 +923,27 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet "system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"), "verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"), }, +}, { + Network: "testing-fake-proofs", + Version: 13, + BundleGitTag: "v13.0.0", + ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"), + Actors: map[string]cid.Cid{ + "account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"), + "cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"), + "datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"), + "eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"), + "ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"), + "evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"), + "init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"), + "multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"), + "paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"), + "placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"), + "reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"), + "storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"), + "storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"), + "storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"), + "system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"), + "verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"), + }, }} diff --git a/build/drand.go b/build/drand.go index 3b976ac92..782ac8692 100644 --- a/build/drand.go +++ b/build/drand.go @@ -10,8 +10,8 @@ type DrandEnum int func DrandConfigSchedule() dtypes.DrandSchedule { out := dtypes.DrandSchedule{} - for start, config := range DrandSchedule { - out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]}) + for start, network := range DrandSchedule { + out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[network]}) } sort.Slice(out, func(i, j int) bool { @@ -27,6 +27,7 @@ const ( DrandDevnet DrandLocalnet DrandIncentinet + DrandQuicknet ) var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ @@ -36,14 +37,32 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "https://api2.drand.sh", "https://api3.drand.sh", "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift }, Relays: []string{ "/dnsaddr/api.drand.sh/", "/dnsaddr/api2.drand.sh/", "/dnsaddr/api3.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, }, + DrandQuicknet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + "https://api.drand.secureweb3.com:6875", // Storswift + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + IsChained: false, + ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`, + }, DrandTestnet: { Servers: []string{ "https://pl-eu.testnet.drand.sh", @@ -55,6 +74,7 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "/dnsaddr/pl-us.testnet.drand.sh/", "/dnsaddr/pl-sin.testnet.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, }, DrandDevnet: { @@ -66,9 +86,11 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ "/dnsaddr/dev1.drand.sh/", "/dnsaddr/dev2.drand.sh/", }, + IsChained: true, ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, }, DrandIncentinet: { + IsChained: true, ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, }, } diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 3c30e81a9..baaf83023 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index 82d638d7e..9c2dfbcf2 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 52f021026..ba135c015 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 9b4f0243d..582310306 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index 4826d421d..03cfd82de 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -23,7 +23,7 @@ var NetworkBundle = "devnet" var BundleOverrides map[actorstypes.Version]string var ActorDebugging = true -const GenesisNetworkVersion = network.Version20 +var GenesisNetworkVersion = network.Version21 var UpgradeBreezeHeight = abi.ChainEpoch(-1) @@ -65,7 +65,11 @@ var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeThunderHeight = abi.ChainEpoch(-23) -var UpgradeWatermelonHeight = abi.ChainEpoch(200) +var UpgradeWatermelonHeight = abi.ChainEpoch(-24) + +var UpgradeDragonHeight = abi.ChainEpoch(20) + +var UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 @@ -73,8 +77,12 @@ const UpgradeWatermelonFixHeight = -100 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFix2Height = -101 +// This fix upgrade only ran on calibrationnet +const UpgradeCalibrationDragonFixHeight = -102 + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var SupportedProofTypes = []abi.RegisteredSealProof{ @@ -91,6 +99,22 @@ func init() { policy.SetMinVerifiedDealSize(MinVerifiedDealSize) policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay) + getGenesisNetworkVersion := func(ev string, def network.Version) network.Version { + hs, found := os.LookupEnv(ev) + if found { + h, err := strconv.Atoi(hs) + if err != nil { + log.Panicf("failed to parse %s env var", ev) + } + + return network.Version(h) + } + + return def + } + + GenesisNetworkVersion = getGenesisNetworkVersion("LOTUS_GENESIS_NETWORK_VERSION", GenesisNetworkVersion) + getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch { hs, found := os.LookupEnv(ev) if found { @@ -129,6 +153,13 @@ func init() { UpgradeLightningHeight = getUpgradeHeight("LOTUS_LIGHTNING_HEIGHT", UpgradeLightningHeight) UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight) UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight) + UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight) + + UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight) + DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, + } BuildType |= Build2k diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 864518df5..aa3c8a68f 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -16,10 +16,11 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } -const GenesisNetworkVersion = network.Version20 +const GenesisNetworkVersion = network.Version21 var NetworkBundle = "butterflynet" var BundleOverrides map[actorstypes.Version]string @@ -54,8 +55,11 @@ const UpgradeSharkHeight = -20 const UpgradeHyggeHeight = -21 const UpgradeLightningHeight = -22 const UpgradeThunderHeight = -23 +const UpgradeWatermelonHeight = -24 -const UpgradeWatermelonHeight = 400 +const UpgradeDragonHeight = 5760 + +const UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -100 @@ -63,6 +67,9 @@ const UpgradeWatermelonFixHeight = -100 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFix2Height = -101 +// This fix upgrade only ran on calibrationnet +const UpgradeCalibrationDragonFixHeight = -102 + var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg512MiBV1, abi.RegisteredSealProof_StackedDrg32GiBV1, diff --git a/build/params_calibnet.go b/build/params_calibnet.go index c22eef2fe..1677027d7 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -19,7 +19,8 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } const GenesisNetworkVersion = network.Version0 @@ -88,6 +89,15 @@ const UpgradeWatermelonFixHeight = 1070494 // 2023-11-21T13:00:00Z const UpgradeWatermelonFix2Height = 1108174 +// 2024-03-11T14:00:00Z +const UpgradeDragonHeight = 1427974 + +// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet +const UpgradePhoenixHeight = UpgradeDragonHeight + 120 + +// 2024-04-03T11:00:00Z +const UpgradeCalibrationDragonFixHeight = 1493854 + var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1, diff --git a/build/params_interop.go b/build/params_interop.go index 9fd0d0ff8..9f34854a0 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -53,8 +53,11 @@ var UpgradeSharkHeight = abi.ChainEpoch(-20) var UpgradeHyggeHeight = abi.ChainEpoch(-21) var UpgradeLightningHeight = abi.ChainEpoch(-22) var UpgradeThunderHeight = abi.ChainEpoch(-23) +var UpgradeWatermelonHeight = abi.ChainEpoch(-24) -const UpgradeWatermelonHeight = 50 +const UpgradeDragonHeight = 50 + +const UpgradePhoenixHeight = UpgradeDragonHeight + 100 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -1 @@ -62,8 +65,12 @@ const UpgradeWatermelonFixHeight = -1 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFix2Height = -2 +// This fix upgrade only ran on calibrationnet +const UpgradeCalibrationDragonFixHeight = -3 + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var SupportedProofTypes = []abi.RegisteredSealProof{ diff --git a/build/params_mainnet.go b/build/params_mainnet.go index 8176c4e6d..5831e5137 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -16,8 +16,9 @@ import ( ) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandIncentinet, - UpgradeSmokeHeight: DrandMainnet, + 0: DrandIncentinet, + UpgradeSmokeHeight: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } var NetworkBundle = "mainnet" @@ -96,7 +97,14 @@ const UpgradeLightningHeight = 2809800 const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21 // 2023-12-12T13:30:00Z -var UpgradeWatermelonHeight = abi.ChainEpoch(3469380) +const UpgradeWatermelonHeight = 3469380 + +// 2024-04-24T14:00:00Z +var UpgradeDragonHeight = abi.ChainEpoch(3855360) + +// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet +// 2024-04-11T15:00:00Z +var UpgradePhoenixHeight = UpgradeDragonHeight + 120 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFixHeight = -1 @@ -104,6 +112,9 @@ const UpgradeWatermelonFixHeight = -1 // This fix upgrade only ran on calibrationnet const UpgradeWatermelonFix2Height = -2 +// This fix upgrade only ran on calibrationnet +const UpgradeCalibrationDragonFixHeight = -3 + var SupportedProofTypes = []abi.RegisteredSealProof{ abi.RegisteredSealProof_StackedDrg32GiBV1, abi.RegisteredSealProof_StackedDrg64GiBV1, @@ -119,8 +130,10 @@ func init() { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_WATERMELON") == "1" { - UpgradeWatermelonHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_DRAGON") == "1" { + UpgradeDragonHeight = math.MaxInt64 - 1 + delete(DrandSchedule, UpgradePhoenixHeight) + UpgradePhoenixHeight = math.MaxInt64 } // NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however, diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 1d15c2fe8..8a3f65501 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1) /* inline-gen template const TestNetworkVersion = network.Version{{.latestNetworkVersion}} /* inline-gen start */ -const TestNetworkVersion = network.Version21 +const TestNetworkVersion = network.Version22 /* inline-gen end */ diff --git a/build/params_testground.go b/build/params_testground.go index 05249e7e2..0fdc1027e 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -87,34 +87,38 @@ var ( UpgradeBreezeHeight abi.ChainEpoch = -1 BreezeGasTampingDuration abi.ChainEpoch = 0 - UpgradeSmokeHeight abi.ChainEpoch = -1 - UpgradeIgnitionHeight abi.ChainEpoch = -2 - UpgradeRefuelHeight abi.ChainEpoch = -3 - UpgradeTapeHeight abi.ChainEpoch = -4 - UpgradeAssemblyHeight abi.ChainEpoch = 10 - UpgradeLiftoffHeight abi.ChainEpoch = -5 - UpgradeKumquatHeight abi.ChainEpoch = -6 - UpgradeCalicoHeight abi.ChainEpoch = -8 - UpgradePersianHeight abi.ChainEpoch = -9 - UpgradeOrangeHeight abi.ChainEpoch = -10 - UpgradeClausHeight abi.ChainEpoch = -11 - UpgradeTrustHeight abi.ChainEpoch = -12 - UpgradeNorwegianHeight abi.ChainEpoch = -13 - UpgradeTurboHeight abi.ChainEpoch = -14 - UpgradeHyperdriveHeight abi.ChainEpoch = -15 - UpgradeChocolateHeight abi.ChainEpoch = -16 - UpgradeOhSnapHeight abi.ChainEpoch = -17 - UpgradeSkyrHeight abi.ChainEpoch = -18 - UpgradeSharkHeight abi.ChainEpoch = -19 - UpgradeHyggeHeight abi.ChainEpoch = -20 - UpgradeLightningHeight abi.ChainEpoch = -21 - UpgradeThunderHeight abi.ChainEpoch = -22 - UpgradeWatermelonHeight abi.ChainEpoch = -23 - UpgradeWatermelonFixHeight abi.ChainEpoch = -24 - UpgradeWatermelonFix2Height abi.ChainEpoch = -25 + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeAssemblyHeight abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + UpgradeKumquatHeight abi.ChainEpoch = -6 + UpgradeCalicoHeight abi.ChainEpoch = -8 + UpgradePersianHeight abi.ChainEpoch = -9 + UpgradeOrangeHeight abi.ChainEpoch = -10 + UpgradeClausHeight abi.ChainEpoch = -11 + UpgradeTrustHeight abi.ChainEpoch = -12 + UpgradeNorwegianHeight abi.ChainEpoch = -13 + UpgradeTurboHeight abi.ChainEpoch = -14 + UpgradeHyperdriveHeight abi.ChainEpoch = -15 + UpgradeChocolateHeight abi.ChainEpoch = -16 + UpgradeOhSnapHeight abi.ChainEpoch = -17 + UpgradeSkyrHeight abi.ChainEpoch = -18 + UpgradeSharkHeight abi.ChainEpoch = -19 + UpgradeHyggeHeight abi.ChainEpoch = -20 + UpgradeLightningHeight abi.ChainEpoch = -21 + UpgradeThunderHeight abi.ChainEpoch = -22 + UpgradeWatermelonHeight abi.ChainEpoch = -23 + UpgradeWatermelonFixHeight abi.ChainEpoch = -24 + UpgradeWatermelonFix2Height abi.ChainEpoch = -25 + UpgradeDragonHeight abi.ChainEpoch = -26 + UpgradePhoenixHeight abi.ChainEpoch = -27 + UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -28 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ - 0: DrandMainnet, + 0: DrandMainnet, + UpgradePhoenixHeight: DrandQuicknet, } GenesisNetworkVersion = network.Version0 diff --git a/build/version.go b/build/version.go index 6ec1ecd7a..5298d8e1e 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.25.3-dev" +const BuildVersion = "1.26.3" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index dcb60f801..0f7ac2093 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-address" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -22,7 +22,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin12.MethodsAccount +var Methods = builtin13.MethodsAccount func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { @@ -47,6 +47,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -117,6 +120,9 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S case actorstypes.Version12: return make12(store, addr) + case actorstypes.Version13: + return make13(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -146,5 +152,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/account/v13.go b/chain/actors/builtin/account/v13.go new file mode 100644 index 000000000..f2f3b6f66 --- /dev/null +++ b/chain/actors/builtin/account/v13.go @@ -0,0 +1,62 @@ +package account + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + actorstypes "github.com/filecoin-project/go-state-types/actors" + account13 "github.com/filecoin-project/go-state-types/builtin/v13/account" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, addr address.Address) (State, error) { + out := state13{store: store} + out.State = account13.State{Address: addr} + return &out, nil +} + +type state13 struct { + account13.State + store adt.Store +} + +func (s *state13) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.AccountKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 17b291788..0c69cfca8 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -43,6 +43,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -113,13 +116,16 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin12.CronActorAddr - Methods = builtin12.MethodsCron + Address = builtin13.CronActorAddr + Methods = builtin13.MethodsCron ) type State interface { @@ -144,5 +150,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/cron/v13.go b/chain/actors/builtin/cron/v13.go new file mode 100644 index 000000000..d2ba03378 --- /dev/null +++ b/chain/actors/builtin/cron/v13.go @@ -0,0 +1,57 @@ +package cron + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = *cron13.ConstructState(cron13.BuiltInEntries()) + return &out, nil +} + +type state13 struct { + cron13.State + store adt.Store +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.CronKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/datacap/datacap.go b/chain/actors/builtin/datacap/datacap.go index 0c8f04bbf..7f5ee6c0b 100644 --- a/chain/actors/builtin/datacap/datacap.go +++ b/chain/actors/builtin/datacap/datacap.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -17,8 +17,8 @@ import ( ) var ( - Address = builtin12.DatacapActorAddr - Methods = builtin12.MethodsDatacap + Address = builtin13.DatacapActorAddr + Methods = builtin13.MethodsDatacap ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -41,6 +41,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -62,6 +65,9 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address case actorstypes.Version12: return make12(store, governor, bitwidth) + case actorstypes.Version13: + return make13(store, governor, bitwidth) + default: return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av) } @@ -86,5 +92,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/datacap/v13.go b/chain/actors/builtin/datacap/v13.go new file mode 100644 index 000000000..3baf374a4 --- /dev/null +++ b/chain/actors/builtin/datacap/v13.go @@ -0,0 +1,82 @@ +package datacap + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, governor address.Address, bitwidth uint64) (State, error) { + out := state13{store: store} + s, err := datacap13.ConstructState(store, governor, bitwidth) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + datacap13.State + store adt.Store +} + +func (s *state13) Governor() (address.Address, error) { + return s.State.Governor, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachClient(s.store, actors.Version13, s.verifiedClients, cb) +} + +func (s *state13) verifiedClients() (adt.Map, error) { + return adt13.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth)) +} + +func (s *state13) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version13, s.verifiedClients, addr) +} + +func (s *state13) ActorKey() string { + return manifest.DatacapKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/evm/actor.go.template b/chain/actors/builtin/evm/actor.go.template index 62da06867..a3681979d 100644 --- a/chain/actors/builtin/evm/actor.go.template +++ b/chain/actors/builtin/evm/actor.go.template @@ -10,6 +10,8 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/manifest" builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin" @@ -17,6 +19,18 @@ import ( var Methods = builtin{{.latestVersion}}.MethodsEVM +// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42 +const ( + ErrReverted exitcode.ExitCode = iota + 33 // EVM exit codes start at 33 + ErrInvalidInstruction + ErrUndefinedInstruction + ErrStackUnderflow + ErrStackOverflow + ErrIllegalMemoryAccess + ErrBadJumpdest + ErrSelfdestructFailed +) + func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { if name != manifest.EvmKey { diff --git a/chain/actors/builtin/evm/evm.go b/chain/actors/builtin/evm/evm.go index 98f860cac..5bda457cd 100644 --- a/chain/actors/builtin/evm/evm.go +++ b/chain/actors/builtin/evm/evm.go @@ -5,8 +5,9 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -14,7 +15,19 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -var Methods = builtin12.MethodsEVM +var Methods = builtin13.MethodsEVM + +// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42 +const ( + ErrReverted exitcode.ExitCode = iota + 33 // EVM exit codes start at 33 + ErrInvalidInstruction + ErrUndefinedInstruction + ErrStackUnderflow + ErrStackOverflow + ErrIllegalMemoryAccess + ErrBadJumpdest + ErrSelfdestructFailed +) func Load(store adt.Store, act *types.Actor) (State, error) { if name, av, ok := actors.GetActorMetaByCode(act.Code); ok { @@ -33,6 +46,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -51,6 +67,9 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State case actorstypes.Version12: return make12(store, bytecode) + case actorstypes.Version13: + return make13(store, bytecode) + default: return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av) } diff --git a/chain/actors/builtin/evm/v13.go b/chain/actors/builtin/evm/v13.go new file mode 100644 index 000000000..180c9f38a --- /dev/null +++ b/chain/actors/builtin/evm/v13.go @@ -0,0 +1,72 @@ +package evm + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm" + + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, bytecode cid.Cid) (State, error) { + out := state13{store: store} + s, err := evm13.ConstructState(store, bytecode) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + evm13.State + store adt.Store +} + +func (s *state13) Nonce() (uint64, error) { + return s.State.Nonce, nil +} + +func (s *state13) IsAlive() (bool, error) { + return s.State.Tombstone == nil, nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetBytecodeCID() (cid.Cid, error) { + return s.State.Bytecode, nil +} + +func (s *state13) GetBytecodeHash() ([32]byte, error) { + return s.State.BytecodeHash, nil +} + +func (s *state13) GetBytecode() ([]byte, error) { + bc, err := s.GetBytecodeCID() + if err != nil { + return nil, err + } + + var byteCode abi.CborBytesTransparent + if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil { + return nil, err + } + + return byteCode, nil +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index 41a763ecf..de1c6274e 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.InitActorAddr - Methods = builtin12.MethodsInit + Address = builtin13.InitActorAddr + Methods = builtin13.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta case actorstypes.Version12: return make12(store, networkName) + case actorstypes.Version13: + return make13(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -174,5 +180,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/init/v13.go b/chain/actors/builtin/init/v13.go new file mode 100644 index 000000000..227ce769f --- /dev/null +++ b/chain/actors/builtin/init/v13.go @@ -0,0 +1,147 @@ +package init + +import ( + "crypto/sha256" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, networkName string) (State, error) { + out := state13{store: store} + + s, err := init13.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + init13.State + store adt.Store +} + +func (s *state13) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state13) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state13) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state13) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state13) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state13) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state13) Remove(addrs ...address.Address) (err error) { + m, err := adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state13) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) AddressMap() (adt.Map, error) { + return adt13.AsMap(s.store, s.State.AddressMap, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) AddressMapBitWidth() int { + return builtin13.DefaultHamtBitwidth +} + +func (s *state13) AddressMapHashFunction() func(input []byte) []byte { + return func(input []byte) []byte { + res := sha256.Sum256(input) + return res[:] + } +} + +func (s *state13) ActorKey() string { + return manifest.InitKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/actor.go.template b/chain/actors/builtin/market/actor.go.template index a84c04ab9..0604737b3 100644 --- a/chain/actors/builtin/market/actor.go.template +++ b/chain/actors/builtin/market/actor.go.template @@ -103,10 +103,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -142,7 +142,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -158,8 +168,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -172,12 +182,36 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, + +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -196,8 +230,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { diff --git a/chain/actors/builtin/market/diff.go b/chain/actors/builtin/market/diff.go index ef3c2c28d..292299790 100644 --- a/chain/actors/builtin/market/diff.go +++ b/chain/actors/builtin/market/diff.go @@ -64,7 +64,7 @@ func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds}) + d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), ds}) return nil } @@ -77,7 +77,7 @@ func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error { if err != nil { return err } - if *dsFrom != *dsTo { + if !dsFrom.Equals(dsTo) { d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo}) } return nil @@ -88,6 +88,6 @@ func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error { if err != nil { return err } - d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds}) + d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), ds}) return nil } diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 39473d560..13c09f91b 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -58,6 +58,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -128,6 +131,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -162,10 +168,10 @@ type BalanceTable interface { type DealStates interface { ForEach(cb func(id abi.DealID, ds DealState) error) error - Get(id abi.DealID) (*DealState, bool, error) + Get(id abi.DealID) (DealState, bool, error) array() adt.Array - decode(*cbg.Deferred) (*DealState, error) + decode(*cbg.Deferred) (DealState, error) } type DealProposals interface { @@ -226,6 +232,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actorstypes.Version12: return decodePublishStorageDealsReturn12(b) + case actorstypes.Version13: + return decodePublishStorageDealsReturn13(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -233,7 +242,17 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora type DealProposal = markettypes.DealProposal type DealLabel = markettypes.DealLabel -type DealState = markettypes.DealState +type DealState interface { + SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector + LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated + SlashEpoch() abi.ChainEpoch // -1 if deal never slashed + + Equals(other DealState) bool +} + +func DealStatesEqual(a, b DealState) bool { + return DealStatesEqual(a, b) +} type DealStateChanges struct { Added []DealIDState @@ -249,8 +268,8 @@ type DealIDState struct { // DealStateChange is a change in deal state from -> to type DealStateChange struct { ID abi.DealID - From *DealState - To *DealState + From DealState + To DealState } type DealProposalChanges struct { @@ -263,12 +282,35 @@ type ProposalIDState struct { Proposal markettypes.DealProposal } -func EmptyDealState() *DealState { - return &DealState{ - SectorStartEpoch: -1, - SlashEpoch: -1, - LastUpdatedEpoch: -1, +type emptyDealState struct{} + +func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) LastUpdatedEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) SlashEpoch() abi.ChainEpoch { + return -1 +} + +func (e *emptyDealState) Equals(other DealState) bool { + if e.SectorStartEpoch() != other.SectorStartEpoch() { + return false } + if e.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if e.SlashEpoch() != other.SlashEpoch() { + return false + } + return true +} + +func EmptyDealState() DealState { + return &emptyDealState{} } // returns the earned fees and pending fees for a given deal @@ -287,8 +329,8 @@ func GetDealFees(deal markettypes.DealProposal, height abi.ChainEpoch) (abi.Toke return ef, big.Sub(tf, ef) } -func IsDealActive(state markettypes.DealState) bool { - return state.SectorStartEpoch > -1 && state.SlashEpoch == -1 +func IsDealActive(state DealState) bool { + return state.SectorStartEpoch() > -1 && state.SlashEpoch() == -1 } func labelFromGoString(s string) (markettypes.DealLabel, error) { @@ -313,5 +355,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/market/state.go.template b/chain/actors/builtin/market/state.go.template index 1eab9d743..467057660 100644 --- a/chain/actors/builtin/market/state.go.template +++ b/chain/actors/builtin/market/state.go.template @@ -175,7 +175,7 @@ type dealStates{{.v}} struct { adt.Array } -func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates{{.v}}) Get(dealID abi.DealID) (DealState, bool, error) { var deal{{.v}} market{{.v}}.DealState found, err := s.Array.Get(uint64(dealID), &deal{{.v}}) if err != nil { @@ -185,7 +185,7 @@ func (s *dealStates{{.v}}) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV{{.v}}DealState(deal{{.v}}) - return &deal, true, nil + return deal, true, nil } func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -195,31 +195,57 @@ func (s *dealStates{{.v}}) ForEach(cb func(dealID abi.DealID, ds DealState) erro }) } -func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates{{.v}}) decode(val *cbg.Deferred) (DealState, error) { var ds{{.v}} market{{.v}}.DealState if err := ds{{.v}}.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV{{.v}}DealState(ds{{.v}}) - return &ds, nil + return ds, nil } func (s *dealStates{{.v}}) array() adt.Array { return s.Array } -func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v{{.v}}.SectorStartEpoch, - LastUpdatedEpoch: v{{.v}}.LastUpdatedEpoch, - SlashEpoch: v{{.v}}.SlashEpoch, - VerifiedClaim: 0, - } - {{if (ge .v 9)}} - ret.VerifiedClaim = verifregtypes.AllocationId(v{{.v}}.VerifiedClaim) - {{end}} +type dealStateV{{.v}} struct { + ds{{.v}} market{{.v}}.DealState +} - return ret +func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SectorStartEpoch +} + +func (d dealStateV{{.v}}) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds{{.v}}.LastUpdatedEpoch +} + +func (d dealStateV{{.v}}) SlashEpoch() abi.ChainEpoch { + return d.ds{{.v}}.SlashEpoch +} + +func (d dealStateV{{.v}}) Equals(other DealState) bool { + if ov{{.v}}, ok := other.(dealStateV{{.v}}); ok { + return d.ds{{.v}} == ov{{.v}}.ds{{.v}} + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV{{.v}})(nil) + +func fromV{{.v}}DealState(v{{.v}} market{{.v}}.DealState) DealState { + return dealStateV{{.v}}{v{{.v}}} } type dealProposals{{.v}} struct { diff --git a/chain/actors/builtin/market/v0.go b/chain/actors/builtin/market/v0.go index ca6970dfa..d797d53f8 100644 --- a/chain/actors/builtin/market/v0.go +++ b/chain/actors/builtin/market/v0.go @@ -154,7 +154,7 @@ type dealStates0 struct { adt.Array } -func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates0) Get(dealID abi.DealID) (DealState, bool, error) { var deal0 market0.DealState found, err := s.Array.Get(uint64(dealID), &deal0) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV0DealState(deal0) - return &deal, true, nil + return deal, true, nil } func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates0) decode(val *cbg.Deferred) (DealState, error) { var ds0 market0.DealState if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV0DealState(ds0) - return &ds, nil + return ds, nil } func (s *dealStates0) array() adt.Array { return s.Array } -func fromV0DealState(v0 market0.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v0.SectorStartEpoch, - LastUpdatedEpoch: v0.LastUpdatedEpoch, - SlashEpoch: v0.SlashEpoch, - VerifiedClaim: 0, +type dealStateV0 struct { + ds0 market0.DealState +} + +func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch { + return d.ds0.SectorStartEpoch +} + +func (d dealStateV0) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds0.LastUpdatedEpoch +} + +func (d dealStateV0) SlashEpoch() abi.ChainEpoch { + return d.ds0.SlashEpoch +} + +func (d dealStateV0) Equals(other DealState) bool { + if ov0, ok := other.(dealStateV0); ok { + return d.ds0 == ov0.ds0 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV0)(nil) + +func fromV0DealState(v0 market0.DealState) DealState { + return dealStateV0{v0} } type dealProposals0 struct { diff --git a/chain/actors/builtin/market/v10.go b/chain/actors/builtin/market/v10.go index 878f0d465..290c17d09 100644 --- a/chain/actors/builtin/market/v10.go +++ b/chain/actors/builtin/market/v10.go @@ -153,7 +153,7 @@ type dealStates10 struct { adt.Array } -func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates10) Get(dealID abi.DealID) (DealState, bool, error) { var deal10 market10.DealState found, err := s.Array.Get(uint64(dealID), &deal10) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates10) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV10DealState(deal10) - return &deal, true, nil + return deal, true, nil } func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates10) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates10) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates10) decode(val *cbg.Deferred) (DealState, error) { var ds10 market10.DealState if err := ds10.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV10DealState(ds10) - return &ds, nil + return ds, nil } func (s *dealStates10) array() adt.Array { return s.Array } -func fromV10DealState(v10 market10.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v10.SectorStartEpoch, - LastUpdatedEpoch: v10.LastUpdatedEpoch, - SlashEpoch: v10.SlashEpoch, - VerifiedClaim: 0, +type dealStateV10 struct { + ds10 market10.DealState +} + +func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch { + return d.ds10.SectorStartEpoch +} + +func (d dealStateV10) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds10.LastUpdatedEpoch +} + +func (d dealStateV10) SlashEpoch() abi.ChainEpoch { + return d.ds10.SlashEpoch +} + +func (d dealStateV10) Equals(other DealState) bool { + if ov10, ok := other.(dealStateV10); ok { + return d.ds10 == ov10.ds10 } - ret.VerifiedClaim = verifregtypes.AllocationId(v10.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV10)(nil) + +func fromV10DealState(v10 market10.DealState) DealState { + return dealStateV10{v10} } type dealProposals10 struct { diff --git a/chain/actors/builtin/market/v11.go b/chain/actors/builtin/market/v11.go index a64272209..56a4c6038 100644 --- a/chain/actors/builtin/market/v11.go +++ b/chain/actors/builtin/market/v11.go @@ -153,7 +153,7 @@ type dealStates11 struct { adt.Array } -func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates11) Get(dealID abi.DealID) (DealState, bool, error) { var deal11 market11.DealState found, err := s.Array.Get(uint64(dealID), &deal11) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates11) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV11DealState(deal11) - return &deal, true, nil + return deal, true, nil } func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates11) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates11) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates11) decode(val *cbg.Deferred) (DealState, error) { var ds11 market11.DealState if err := ds11.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV11DealState(ds11) - return &ds, nil + return ds, nil } func (s *dealStates11) array() adt.Array { return s.Array } -func fromV11DealState(v11 market11.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v11.SectorStartEpoch, - LastUpdatedEpoch: v11.LastUpdatedEpoch, - SlashEpoch: v11.SlashEpoch, - VerifiedClaim: 0, +type dealStateV11 struct { + ds11 market11.DealState +} + +func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch { + return d.ds11.SectorStartEpoch +} + +func (d dealStateV11) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds11.LastUpdatedEpoch +} + +func (d dealStateV11) SlashEpoch() abi.ChainEpoch { + return d.ds11.SlashEpoch +} + +func (d dealStateV11) Equals(other DealState) bool { + if ov11, ok := other.(dealStateV11); ok { + return d.ds11 == ov11.ds11 } - ret.VerifiedClaim = verifregtypes.AllocationId(v11.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV11)(nil) + +func fromV11DealState(v11 market11.DealState) DealState { + return dealStateV11{v11} } type dealProposals11 struct { diff --git a/chain/actors/builtin/market/v12.go b/chain/actors/builtin/market/v12.go index 56e651a9b..cf7687203 100644 --- a/chain/actors/builtin/market/v12.go +++ b/chain/actors/builtin/market/v12.go @@ -153,7 +153,7 @@ type dealStates12 struct { adt.Array } -func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates12) Get(dealID abi.DealID) (DealState, bool, error) { var deal12 market12.DealState found, err := s.Array.Get(uint64(dealID), &deal12) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates12) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV12DealState(deal12) - return &deal, true, nil + return deal, true, nil } func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates12) ForEach(cb func(dealID abi.DealID, ds DealState) error) e }) } -func (s *dealStates12) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates12) decode(val *cbg.Deferred) (DealState, error) { var ds12 market12.DealState if err := ds12.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV12DealState(ds12) - return &ds, nil + return ds, nil } func (s *dealStates12) array() adt.Array { return s.Array } -func fromV12DealState(v12 market12.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v12.SectorStartEpoch, - LastUpdatedEpoch: v12.LastUpdatedEpoch, - SlashEpoch: v12.SlashEpoch, - VerifiedClaim: 0, +type dealStateV12 struct { + ds12 market12.DealState +} + +func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch { + return d.ds12.SectorStartEpoch +} + +func (d dealStateV12) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds12.LastUpdatedEpoch +} + +func (d dealStateV12) SlashEpoch() abi.ChainEpoch { + return d.ds12.SlashEpoch +} + +func (d dealStateV12) Equals(other DealState) bool { + if ov12, ok := other.(dealStateV12); ok { + return d.ds12 == ov12.ds12 } - ret.VerifiedClaim = verifregtypes.AllocationId(v12.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV12)(nil) + +func fromV12DealState(v12 market12.DealState) DealState { + return dealStateV12{v12} } type dealProposals12 struct { diff --git a/chain/actors/builtin/market/v13.go b/chain/actors/builtin/market/v13.go new file mode 100644 index 000000000..d270319ce --- /dev/null +++ b/chain/actors/builtin/market/v13.go @@ -0,0 +1,404 @@ +package market + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/builtin" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + + s, err := market13.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + market13.State + store adt.Store +} + +func (s *state13) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state13) BalancesChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState13.State.EscrowTable) || !s.State.LockedTable.Equals(otherState13.State.LockedTable), nil +} + +func (s *state13) StatesChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState13.State.States), nil +} + +func (s *state13) States() (DealStates, error) { + stateArray, err := adt13.AsArray(s.store, s.State.States, market13.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates13{stateArray}, nil +} + +func (s *state13) ProposalsChanged(otherState State) (bool, error) { + otherState13, ok := otherState.(*state13) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState13.State.Proposals), nil +} + +func (s *state13) Proposals() (DealProposals, error) { + proposalArray, err := adt13.AsArray(s.store, s.State.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals13{proposalArray}, nil +} + +func (s *state13) EscrowTable() (BalanceTable, error) { + bt, err := adt13.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable13{bt}, nil +} + +func (s *state13) LockedTable() (BalanceTable, error) { + bt, err := adt13.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable13{bt}, nil +} + +func (s *state13) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market13.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state13) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable13 struct { + *adt13.BalanceTable +} + +func (bt *balanceTable13) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt13.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates13 struct { + adt.Array +} + +func (s *dealStates13) Get(dealID abi.DealID) (DealState, bool, error) { + var deal13 market13.DealState + found, err := s.Array.Get(uint64(dealID), &deal13) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV13DealState(deal13) + return deal, true, nil +} + +func (s *dealStates13) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds13 market13.DealState + return s.Array.ForEach(&ds13, func(idx int64) error { + return cb(abi.DealID(idx), fromV13DealState(ds13)) + }) +} + +func (s *dealStates13) decode(val *cbg.Deferred) (DealState, error) { + var ds13 market13.DealState + if err := ds13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV13DealState(ds13) + return ds, nil +} + +func (s *dealStates13) array() adt.Array { + return s.Array +} + +type dealStateV13 struct { + ds13 market13.DealState +} + +func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch { + return d.ds13.SectorStartEpoch +} + +func (d dealStateV13) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds13.LastUpdatedEpoch +} + +func (d dealStateV13) SlashEpoch() abi.ChainEpoch { + return d.ds13.SlashEpoch +} + +func (d dealStateV13) Equals(other DealState) bool { + if ov13, ok := other.(dealStateV13); ok { + return d.ds13 == ov13.ds13 + } + + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV13)(nil) + +func fromV13DealState(v13 market13.DealState) DealState { + return dealStateV13{v13} +} + +type dealProposals13 struct { + adt.Array +} + +func (s *dealProposals13) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal13 market13.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal13) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + + proposal, err := fromV13DealProposal(proposal13) + if err != nil { + return nil, true, xerrors.Errorf("decoding proposal: %w", err) + } + + return &proposal, true, nil +} + +func (s *dealProposals13) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp13 market13.DealProposal + return s.Array.ForEach(&dp13, func(idx int64) error { + dp, err := fromV13DealProposal(dp13) + if err != nil { + return xerrors.Errorf("decoding proposal: %w", err) + } + + return cb(abi.DealID(idx), dp) + }) +} + +func (s *dealProposals13) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp13 market13.DealProposal + if err := dp13.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + + dp, err := fromV13DealProposal(dp13) + if err != nil { + return nil, err + } + + return &dp, nil +} + +func (s *dealProposals13) array() adt.Array { + return s.Array +} + +func fromV13DealProposal(v13 market13.DealProposal) (DealProposal, error) { + + label, err := fromV13Label(v13.Label) + + if err != nil { + return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err) + } + + return DealProposal{ + PieceCID: v13.PieceCID, + PieceSize: v13.PieceSize, + VerifiedDeal: v13.VerifiedDeal, + Client: v13.Client, + Provider: v13.Provider, + + Label: label, + + StartEpoch: v13.StartEpoch, + EndEpoch: v13.EndEpoch, + StoragePricePerEpoch: v13.StoragePricePerEpoch, + + ProviderCollateral: v13.ProviderCollateral, + ClientCollateral: v13.ClientCollateral, + }, nil +} + +func fromV13Label(v13 market13.DealLabel) (DealLabel, error) { + if v13.IsString() { + str, err := v13.ToString() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err) + } + return markettypes.NewLabelFromString(str) + } + + bs, err := v13.ToBytes() + if err != nil { + return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err) + } + return markettypes.NewLabelFromBytes(bs) +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn13)(nil) + +func decodePublishStorageDealsReturn13(b []byte) (PublishStorageDealsReturn, error) { + var retval market13.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn13{retval}, nil +} + +type publishStorageDealsReturn13 struct { + market13.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn13) IsDealValid(index uint64) (bool, int, error) { + + set, err := r.ValidDeals.IsSet(index) + if err != nil || !set { + return false, -1, err + } + maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{ + Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}}) + if err != nil { + return false, -1, err + } + before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals) + if err != nil { + return false, -1, err + } + outIdx, err := before.Count() + if err != nil { + return false, -1, err + } + return set, int(outIdx), nil + +} + +func (r *publishStorageDealsReturn13) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} + +func (s *state13) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) { + + allocations, err := adt13.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + + var allocationId cbg.CborInt + found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId) + if err != nil { + return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err) + } + if !found { + return verifregtypes.NoAllocationID, nil + } + + return verifregtypes.AllocationId(allocationId), nil + +} + +func (s *state13) ActorKey() string { + return manifest.MarketKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/market/v2.go b/chain/actors/builtin/market/v2.go index ba84e3b03..5ced3c8a3 100644 --- a/chain/actors/builtin/market/v2.go +++ b/chain/actors/builtin/market/v2.go @@ -154,7 +154,7 @@ type dealStates2 struct { adt.Array } -func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates2) Get(dealID abi.DealID) (DealState, bool, error) { var deal2 market2.DealState found, err := s.Array.Get(uint64(dealID), &deal2) if err != nil { @@ -164,7 +164,7 @@ func (s *dealStates2) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV2DealState(deal2) - return &deal, true, nil + return deal, true, nil } func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -174,28 +174,57 @@ func (s *dealStates2) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates2) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates2) decode(val *cbg.Deferred) (DealState, error) { var ds2 market2.DealState if err := ds2.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV2DealState(ds2) - return &ds, nil + return ds, nil } func (s *dealStates2) array() adt.Array { return s.Array } -func fromV2DealState(v2 market2.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v2.SectorStartEpoch, - LastUpdatedEpoch: v2.LastUpdatedEpoch, - SlashEpoch: v2.SlashEpoch, - VerifiedClaim: 0, +type dealStateV2 struct { + ds2 market2.DealState +} + +func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch { + return d.ds2.SectorStartEpoch +} + +func (d dealStateV2) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds2.LastUpdatedEpoch +} + +func (d dealStateV2) SlashEpoch() abi.ChainEpoch { + return d.ds2.SlashEpoch +} + +func (d dealStateV2) Equals(other DealState) bool { + if ov2, ok := other.(dealStateV2); ok { + return d.ds2 == ov2.ds2 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV2)(nil) + +func fromV2DealState(v2 market2.DealState) DealState { + return dealStateV2{v2} } type dealProposals2 struct { diff --git a/chain/actors/builtin/market/v3.go b/chain/actors/builtin/market/v3.go index f6a0891e7..35dd9c29a 100644 --- a/chain/actors/builtin/market/v3.go +++ b/chain/actors/builtin/market/v3.go @@ -149,7 +149,7 @@ type dealStates3 struct { adt.Array } -func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates3) Get(dealID abi.DealID) (DealState, bool, error) { var deal3 market3.DealState found, err := s.Array.Get(uint64(dealID), &deal3) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates3) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV3DealState(deal3) - return &deal, true, nil + return deal, true, nil } func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates3) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates3) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates3) decode(val *cbg.Deferred) (DealState, error) { var ds3 market3.DealState if err := ds3.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV3DealState(ds3) - return &ds, nil + return ds, nil } func (s *dealStates3) array() adt.Array { return s.Array } -func fromV3DealState(v3 market3.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v3.SectorStartEpoch, - LastUpdatedEpoch: v3.LastUpdatedEpoch, - SlashEpoch: v3.SlashEpoch, - VerifiedClaim: 0, +type dealStateV3 struct { + ds3 market3.DealState +} + +func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch { + return d.ds3.SectorStartEpoch +} + +func (d dealStateV3) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds3.LastUpdatedEpoch +} + +func (d dealStateV3) SlashEpoch() abi.ChainEpoch { + return d.ds3.SlashEpoch +} + +func (d dealStateV3) Equals(other DealState) bool { + if ov3, ok := other.(dealStateV3); ok { + return d.ds3 == ov3.ds3 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV3)(nil) + +func fromV3DealState(v3 market3.DealState) DealState { + return dealStateV3{v3} } type dealProposals3 struct { diff --git a/chain/actors/builtin/market/v4.go b/chain/actors/builtin/market/v4.go index 629e833b6..bc9e61c88 100644 --- a/chain/actors/builtin/market/v4.go +++ b/chain/actors/builtin/market/v4.go @@ -149,7 +149,7 @@ type dealStates4 struct { adt.Array } -func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates4) Get(dealID abi.DealID) (DealState, bool, error) { var deal4 market4.DealState found, err := s.Array.Get(uint64(dealID), &deal4) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates4) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV4DealState(deal4) - return &deal, true, nil + return deal, true, nil } func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates4) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates4) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates4) decode(val *cbg.Deferred) (DealState, error) { var ds4 market4.DealState if err := ds4.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV4DealState(ds4) - return &ds, nil + return ds, nil } func (s *dealStates4) array() adt.Array { return s.Array } -func fromV4DealState(v4 market4.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v4.SectorStartEpoch, - LastUpdatedEpoch: v4.LastUpdatedEpoch, - SlashEpoch: v4.SlashEpoch, - VerifiedClaim: 0, +type dealStateV4 struct { + ds4 market4.DealState +} + +func (d dealStateV4) SectorStartEpoch() abi.ChainEpoch { + return d.ds4.SectorStartEpoch +} + +func (d dealStateV4) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds4.LastUpdatedEpoch +} + +func (d dealStateV4) SlashEpoch() abi.ChainEpoch { + return d.ds4.SlashEpoch +} + +func (d dealStateV4) Equals(other DealState) bool { + if ov4, ok := other.(dealStateV4); ok { + return d.ds4 == ov4.ds4 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV4)(nil) + +func fromV4DealState(v4 market4.DealState) DealState { + return dealStateV4{v4} } type dealProposals4 struct { diff --git a/chain/actors/builtin/market/v5.go b/chain/actors/builtin/market/v5.go index 892588979..63743ba8d 100644 --- a/chain/actors/builtin/market/v5.go +++ b/chain/actors/builtin/market/v5.go @@ -149,7 +149,7 @@ type dealStates5 struct { adt.Array } -func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates5) Get(dealID abi.DealID) (DealState, bool, error) { var deal5 market5.DealState found, err := s.Array.Get(uint64(dealID), &deal5) if err != nil { @@ -159,7 +159,7 @@ func (s *dealStates5) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV5DealState(deal5) - return &deal, true, nil + return deal, true, nil } func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -169,28 +169,57 @@ func (s *dealStates5) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates5) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates5) decode(val *cbg.Deferred) (DealState, error) { var ds5 market5.DealState if err := ds5.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV5DealState(ds5) - return &ds, nil + return ds, nil } func (s *dealStates5) array() adt.Array { return s.Array } -func fromV5DealState(v5 market5.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v5.SectorStartEpoch, - LastUpdatedEpoch: v5.LastUpdatedEpoch, - SlashEpoch: v5.SlashEpoch, - VerifiedClaim: 0, +type dealStateV5 struct { + ds5 market5.DealState +} + +func (d dealStateV5) SectorStartEpoch() abi.ChainEpoch { + return d.ds5.SectorStartEpoch +} + +func (d dealStateV5) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds5.LastUpdatedEpoch +} + +func (d dealStateV5) SlashEpoch() abi.ChainEpoch { + return d.ds5.SlashEpoch +} + +func (d dealStateV5) Equals(other DealState) bool { + if ov5, ok := other.(dealStateV5); ok { + return d.ds5 == ov5.ds5 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV5)(nil) + +func fromV5DealState(v5 market5.DealState) DealState { + return dealStateV5{v5} } type dealProposals5 struct { diff --git a/chain/actors/builtin/market/v6.go b/chain/actors/builtin/market/v6.go index b57d49f91..5900eace9 100644 --- a/chain/actors/builtin/market/v6.go +++ b/chain/actors/builtin/market/v6.go @@ -151,7 +151,7 @@ type dealStates6 struct { adt.Array } -func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates6) Get(dealID abi.DealID) (DealState, bool, error) { var deal6 market6.DealState found, err := s.Array.Get(uint64(dealID), &deal6) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates6) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV6DealState(deal6) - return &deal, true, nil + return deal, true, nil } func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates6) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates6) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates6) decode(val *cbg.Deferred) (DealState, error) { var ds6 market6.DealState if err := ds6.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV6DealState(ds6) - return &ds, nil + return ds, nil } func (s *dealStates6) array() adt.Array { return s.Array } -func fromV6DealState(v6 market6.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v6.SectorStartEpoch, - LastUpdatedEpoch: v6.LastUpdatedEpoch, - SlashEpoch: v6.SlashEpoch, - VerifiedClaim: 0, +type dealStateV6 struct { + ds6 market6.DealState +} + +func (d dealStateV6) SectorStartEpoch() abi.ChainEpoch { + return d.ds6.SectorStartEpoch +} + +func (d dealStateV6) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds6.LastUpdatedEpoch +} + +func (d dealStateV6) SlashEpoch() abi.ChainEpoch { + return d.ds6.SlashEpoch +} + +func (d dealStateV6) Equals(other DealState) bool { + if ov6, ok := other.(dealStateV6); ok { + return d.ds6 == ov6.ds6 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV6)(nil) + +func fromV6DealState(v6 market6.DealState) DealState { + return dealStateV6{v6} } type dealProposals6 struct { diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go index 56a1db328..f51f070c7 100644 --- a/chain/actors/builtin/market/v7.go +++ b/chain/actors/builtin/market/v7.go @@ -151,7 +151,7 @@ type dealStates7 struct { adt.Array } -func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates7) Get(dealID abi.DealID) (DealState, bool, error) { var deal7 market7.DealState found, err := s.Array.Get(uint64(dealID), &deal7) if err != nil { @@ -161,7 +161,7 @@ func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV7DealState(deal7) - return &deal, true, nil + return deal, true, nil } func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -171,28 +171,57 @@ func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates7) decode(val *cbg.Deferred) (DealState, error) { var ds7 market7.DealState if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV7DealState(ds7) - return &ds, nil + return ds, nil } func (s *dealStates7) array() adt.Array { return s.Array } -func fromV7DealState(v7 market7.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v7.SectorStartEpoch, - LastUpdatedEpoch: v7.LastUpdatedEpoch, - SlashEpoch: v7.SlashEpoch, - VerifiedClaim: 0, +type dealStateV7 struct { + ds7 market7.DealState +} + +func (d dealStateV7) SectorStartEpoch() abi.ChainEpoch { + return d.ds7.SectorStartEpoch +} + +func (d dealStateV7) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds7.LastUpdatedEpoch +} + +func (d dealStateV7) SlashEpoch() abi.ChainEpoch { + return d.ds7.SlashEpoch +} + +func (d dealStateV7) Equals(other DealState) bool { + if ov7, ok := other.(dealStateV7); ok { + return d.ds7 == ov7.ds7 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV7)(nil) + +func fromV7DealState(v7 market7.DealState) DealState { + return dealStateV7{v7} } type dealProposals7 struct { diff --git a/chain/actors/builtin/market/v8.go b/chain/actors/builtin/market/v8.go index 9c68ee1fd..f9bf25f9c 100644 --- a/chain/actors/builtin/market/v8.go +++ b/chain/actors/builtin/market/v8.go @@ -152,7 +152,7 @@ type dealStates8 struct { adt.Array } -func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates8) Get(dealID abi.DealID) (DealState, bool, error) { var deal8 market8.DealState found, err := s.Array.Get(uint64(dealID), &deal8) if err != nil { @@ -162,7 +162,7 @@ func (s *dealStates8) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV8DealState(deal8) - return &deal, true, nil + return deal, true, nil } func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -172,28 +172,57 @@ func (s *dealStates8) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates8) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates8) decode(val *cbg.Deferred) (DealState, error) { var ds8 market8.DealState if err := ds8.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV8DealState(ds8) - return &ds, nil + return ds, nil } func (s *dealStates8) array() adt.Array { return s.Array } -func fromV8DealState(v8 market8.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v8.SectorStartEpoch, - LastUpdatedEpoch: v8.LastUpdatedEpoch, - SlashEpoch: v8.SlashEpoch, - VerifiedClaim: 0, +type dealStateV8 struct { + ds8 market8.DealState +} + +func (d dealStateV8) SectorStartEpoch() abi.ChainEpoch { + return d.ds8.SectorStartEpoch +} + +func (d dealStateV8) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds8.LastUpdatedEpoch +} + +func (d dealStateV8) SlashEpoch() abi.ChainEpoch { + return d.ds8.SlashEpoch +} + +func (d dealStateV8) Equals(other DealState) bool { + if ov8, ok := other.(dealStateV8); ok { + return d.ds8 == ov8.ds8 } - return ret + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } + + return true +} + +var _ DealState = (*dealStateV8)(nil) + +func fromV8DealState(v8 market8.DealState) DealState { + return dealStateV8{v8} } type dealProposals8 struct { diff --git a/chain/actors/builtin/market/v9.go b/chain/actors/builtin/market/v9.go index d692c15cc..3b5be4dfa 100644 --- a/chain/actors/builtin/market/v9.go +++ b/chain/actors/builtin/market/v9.go @@ -153,7 +153,7 @@ type dealStates9 struct { adt.Array } -func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { +func (s *dealStates9) Get(dealID abi.DealID) (DealState, bool, error) { var deal9 market9.DealState found, err := s.Array.Get(uint64(dealID), &deal9) if err != nil { @@ -163,7 +163,7 @@ func (s *dealStates9) Get(dealID abi.DealID) (*DealState, bool, error) { return nil, false, nil } deal := fromV9DealState(deal9) - return &deal, true, nil + return deal, true, nil } func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { @@ -173,30 +173,57 @@ func (s *dealStates9) ForEach(cb func(dealID abi.DealID, ds DealState) error) er }) } -func (s *dealStates9) decode(val *cbg.Deferred) (*DealState, error) { +func (s *dealStates9) decode(val *cbg.Deferred) (DealState, error) { var ds9 market9.DealState if err := ds9.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { return nil, err } ds := fromV9DealState(ds9) - return &ds, nil + return ds, nil } func (s *dealStates9) array() adt.Array { return s.Array } -func fromV9DealState(v9 market9.DealState) DealState { - ret := DealState{ - SectorStartEpoch: v9.SectorStartEpoch, - LastUpdatedEpoch: v9.LastUpdatedEpoch, - SlashEpoch: v9.SlashEpoch, - VerifiedClaim: 0, +type dealStateV9 struct { + ds9 market9.DealState +} + +func (d dealStateV9) SectorStartEpoch() abi.ChainEpoch { + return d.ds9.SectorStartEpoch +} + +func (d dealStateV9) LastUpdatedEpoch() abi.ChainEpoch { + return d.ds9.LastUpdatedEpoch +} + +func (d dealStateV9) SlashEpoch() abi.ChainEpoch { + return d.ds9.SlashEpoch +} + +func (d dealStateV9) Equals(other DealState) bool { + if ov9, ok := other.(dealStateV9); ok { + return d.ds9 == ov9.ds9 } - ret.VerifiedClaim = verifregtypes.AllocationId(v9.VerifiedClaim) + if d.SectorStartEpoch() != other.SectorStartEpoch() { + return false + } + if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() { + return false + } + if d.SlashEpoch() != other.SlashEpoch() { + return false + } - return ret + return true +} + +var _ DealState = (*dealStateV9)(nil) + +func fromV9DealState(v9 market9.DealState) DealState { + return dealStateV9{v9} } type dealProposals9 struct { diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index b4ad3a4b5..089e3dc68 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/manifest" @@ -239,7 +240,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -248,6 +251,12 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 1637cdade..6bb1028f3 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -9,6 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/dline" @@ -51,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -121,6 +125,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version12: return make12(store) + case actors.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -299,7 +306,9 @@ type DeclareFaultsParams = minertypes.DeclareFaultsParams type ProveCommitAggregateParams = minertypes.ProveCommitAggregateParams type ProveCommitSectorParams = minertypes.ProveCommitSectorParams type ProveReplicaUpdatesParams = minertypes.ProveReplicaUpdatesParams +type ProveReplicaUpdatesParams2 = minertypes.ProveReplicaUpdatesParams2 type ReplicaUpdate = minertypes.ReplicaUpdate +type ReplicaUpdate2 = minertypes.ReplicaUpdate2 type PreCommitSectorBatchParams = minertypes.PreCommitSectorBatchParams type PreCommitSectorBatchParams2 = minertypes.PreCommitSectorBatchParams2 type ExtendSectorExpiration2Params = minertypes.ExtendSectorExpiration2Params @@ -308,6 +317,12 @@ type ExpirationExtension2 = minertypes.ExpirationExtension2 type CompactPartitionsParams = minertypes.CompactPartitionsParams type WithdrawBalanceParams = minertypes.WithdrawBalanceParams +type PieceActivationManifest = minertypes13.PieceActivationManifest +type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params +type SectorActivationManifest = minertypes13.SectorActivationManifest +type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params +type SectorUpdateManifest = minertypes13.SectorUpdateManifest + var QAPowerMax = minertypes.QAPowerMax type WindowPostVerifyInfo = proof.WindowPoStVerifyInfo @@ -374,5 +389,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/miner/v13.go b/chain/actors/builtin/miner/v13.go new file mode 100644 index 000000000..bbd95f818 --- /dev/null +++ b/chain/actors/builtin/miner/v13.go @@ -0,0 +1,591 @@ +package miner + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/dline" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = miner13.State{} + return &out, nil +} + +type state13 struct { + miner13.State + store adt.Store +} + +type deadline13 struct { + miner13.Deadline + store adt.Store +} + +type partition13 struct { + miner13.Partition + store adt.Store +} + +func (s *state13) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesn't have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state13) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state13) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state13) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state13) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state13) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +// Returns nil, nil if sector is not found +func (s *state13) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV13SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state13) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state13) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state13) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner13.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner13.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner13.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner13.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner13.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state13) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV13SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state13) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt13.AsMap(s.store, s.State.PreCommittedSectors, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner13.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV13SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state13) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner13.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info13 miner13.SectorOnChainInfo + if err := sectors.ForEach(&info13, func(_ int64) error { + info := fromV13SectorOnChainInfo(info13) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos13, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos13)) + for i, info13 := range infos13 { + info := fromV13SectorOnChainInfo(*info13) + infos[i] = &info + } + return infos, nil +} + +func (s *state13) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state13) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state13) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state13) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state13) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state13) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline13{*dl, s.store}, nil +} + +func (s *state13) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner13.Deadline) error { + return cb(i, &deadline13{*dl, s.store}) + }) +} + +func (s *state13) NumDeadlines() (uint64, error) { + return miner13.WPoStPeriodDeadlines, nil +} + +func (s *state13) DeadlinesChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other13.Deadlines), nil +} + +func (s *state13) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state13) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + PendingWorkerKey: (*WorkerKeyChange)(info.PendingWorkerKey), + + PeerId: info.PeerId, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + + Beneficiary: info.Beneficiary, + BeneficiaryTerm: BeneficiaryTerm(info.BeneficiaryTerm), + PendingBeneficiaryTerm: (*PendingBeneficiaryChange)(info.PendingBeneficiaryTerm), + } + + return mi, nil +} + +func (s *state13) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state13) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state13) sectors() (adt.Array, error) { + return adt13.AsArray(s.store, s.Sectors, miner13.SectorsAmtBitwidth) +} + +func (s *state13) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner13.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV13SectorOnChainInfo(si), nil +} + +func (s *state13) precommits() (adt.Map, error) { + return adt13.AsMap(s.store, s.PreCommittedSectors, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner13.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV13SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state13) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner13.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner13.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline13) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition13{*p, d.store}, nil +} + +func (d *deadline13) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner13.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition13{part, d.store}) + }) +} + +func (d *deadline13) PartitionsChanged(other Deadline) (bool, error) { + other13, ok := other.(*deadline13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other13.Deadline.Partitions), nil +} + +func (d *deadline13) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline13) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition13) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition13) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition13) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition13) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV13SectorOnChainInfo(v13 miner13.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v13.SectorNumber, + SealProof: v13.SealProof, + SealedCID: v13.SealedCID, + DealIDs: v13.DealIDs, + Activation: v13.Activation, + Expiration: v13.Expiration, + DealWeight: v13.DealWeight, + VerifiedDealWeight: v13.VerifiedDealWeight, + InitialPledge: v13.InitialPledge, + ExpectedDayReward: v13.ExpectedDayReward, + ExpectedStoragePledge: v13.ExpectedStoragePledge, + + SectorKeyCID: v13.SectorKeyCID, + } + return info +} + +func fromV13SectorPreCommitOnChainInfo(v13 miner13.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + ret := SectorPreCommitOnChainInfo{ + Info: SectorPreCommitInfo{ + SealProof: v13.Info.SealProof, + SectorNumber: v13.Info.SectorNumber, + SealedCID: v13.Info.SealedCID, + SealRandEpoch: v13.Info.SealRandEpoch, + DealIDs: v13.Info.DealIDs, + Expiration: v13.Info.Expiration, + UnsealedCid: nil, + }, + PreCommitDeposit: v13.PreCommitDeposit, + PreCommitEpoch: v13.PreCommitEpoch, + } + + ret.Info.UnsealedCid = v13.Info.UnsealedCid + + return ret +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.MinerKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/multisig/message10.go b/chain/actors/builtin/multisig/message10.go index 8f7bb5a6f..59dd4dde0 100644 --- a/chain/actors/builtin/multisig/message10.go +++ b/chain/actors/builtin/multisig/message10.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message10) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message11.go b/chain/actors/builtin/multisig/message11.go index 4c7520d5d..89bee0255 100644 --- a/chain/actors/builtin/multisig/message11.go +++ b/chain/actors/builtin/multisig/message11.go @@ -8,7 +8,7 @@ import ( actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message11) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message12.go b/chain/actors/builtin/multisig/message12.go index 43658c04b..326026c93 100644 --- a/chain/actors/builtin/multisig/message12.go +++ b/chain/actors/builtin/multisig/message12.go @@ -7,8 +7,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/lotus/chain/actors" @@ -57,7 +57,7 @@ func (m message12) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message13.go b/chain/actors/builtin/multisig/message13.go new file mode 100644 index 000000000..94a9cbfbf --- /dev/null +++ b/chain/actors/builtin/multisig/message13.go @@ -0,0 +1,77 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtintypes "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message13 struct{ message0 } + +func (m message13) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig13.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + code, ok := actors.GetActorCodeID(actorstypes.Version13, manifest.MultisigKey) + if !ok { + return nil, xerrors.Errorf("failed to get multisig code ID") + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init13.ExecParams{ + CodeCID: code, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtintypes.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/message8.go b/chain/actors/builtin/multisig/message8.go index 390c94691..5d79fe6c5 100644 --- a/chain/actors/builtin/multisig/message8.go +++ b/chain/actors/builtin/multisig/message8.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message8) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/message9.go b/chain/actors/builtin/multisig/message9.go index 907bec7d5..9003b7e38 100644 --- a/chain/actors/builtin/multisig/message9.go +++ b/chain/actors/builtin/multisig/message9.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig" "github.com/filecoin-project/go-state-types/manifest" @@ -57,7 +57,7 @@ func (m message9) Create( } // new actors are created by invoking 'exec' on the init actor with the constructor params - execParams := &init12.ExecParams{ + execParams := &init13.ExecParams{ CodeCID: code, ConstructorParams: enc, } diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index 71a3b7b22..08da9bd2d 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" builtintypes "github.com/filecoin-project/go-state-types/builtin" - msig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig" + msig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -51,6 +51,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -121,6 +124,9 @@ func MakeState(store adt.Store, av actorstypes.Version, signers []address.Addres case actorstypes.Version12: return make12(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actorstypes.Version13: + return make13(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -147,7 +153,7 @@ type State interface { GetState() interface{} } -type Transaction = msig12.Transaction +type Transaction = msig13.Transaction var Methods = builtintypes.MethodsMultisig @@ -189,6 +195,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version12: return message12{message0{from}} + + case actorstypes.Version13: + return message13{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -212,13 +221,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig12.ProposalHashData -type ProposeReturn = msig12.ProposeReturn -type ProposeParams = msig12.ProposeParams -type ApproveReturn = msig12.ApproveReturn +type ProposalHashData = msig13.ProposalHashData +type ProposeReturn = msig13.ProposeReturn +type ProposeParams = msig13.ProposeParams +type ApproveReturn = msig13.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig12.TxnIDParams{ID: msig12.TxnID(id)} + params := msig13.TxnIDParams{ID: msig13.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) @@ -254,5 +263,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/multisig/v13.go b/chain/actors/builtin/multisig/v13.go new file mode 100644 index 000000000..57dd66976 --- /dev/null +++ b/chain/actors/builtin/multisig/v13.go @@ -0,0 +1,138 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + msig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state13{store: store} + out.State = msig13.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt13.StoreEmptyMap(store, builtin13.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state13 struct { + msig13.State + store adt.Store +} + +func (s *state13) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state13) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state13) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state13) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state13) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state13) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state13) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt13.AsMap(s.store, s.State.PendingTxns, builtin13.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig13.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state13) PendingTxnChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other13.PendingTxns), nil +} + +func (s *state13) transactions() (adt.Map, error) { + return adt13.AsMap(s.store, s.PendingTxns, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig13.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return Transaction(tx), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.MultisigKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/paych/message13.go b/chain/actors/builtin/paych/message13.go new file mode 100644 index 000000000..1614ec608 --- /dev/null +++ b/chain/actors/builtin/paych/message13.go @@ -0,0 +1,109 @@ +package paych + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + paychtypes "github.com/filecoin-project/go-state-types/builtin/v8/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message13 struct{ from address.Address } + +func (m message13) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + + actorCodeID, ok := actors.GetActorCodeID(actorstypes.Version13, "paymentchannel") + if !ok { + return nil, xerrors.Errorf("error getting actor paymentchannel code id for actor version %d", 13) + } + + params, aerr := actors.SerializeParams(&paych13.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init13.ExecParams{ + CodeCID: actorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin13.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message13) Update(paych address.Address, sv *paychtypes.SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych13.UpdateChannelStateParams{ + + Sv: toV13SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func toV13SignedVoucher(sv paychtypes.SignedVoucher) paych13.SignedVoucher { + merges := make([]paych13.Merge, len(sv.Merges)) + for i := range sv.Merges { + merges[i] = paych13.Merge{ + Lane: sv.Merges[i].Lane, + Nonce: sv.Merges[i].Nonce, + } + } + + return paych13.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretHash, + Extra: (*paych13.ModVerifyParams)(sv.Extra), + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: merges, + Signature: sv.Signature, + } +} + +func (m message13) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.Settle, + }, nil +} + +func (m message13) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin13.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index fc8908cb4..2b5c78edf 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -53,6 +53,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -173,6 +176,9 @@ func Message(version actorstypes.Version, from address.Address) MessageBuilder { case actorstypes.Version12: return message12{from} + case actorstypes.Version13: + return message13{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -215,5 +221,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/paych/v13.go b/chain/actors/builtin/paych/v13.go new file mode 100644 index 000000000..c5a10c571 --- /dev/null +++ b/chain/actors/builtin/paych/v13.go @@ -0,0 +1,135 @@ +package paych + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + out.State = paych13.State{} + return &out, nil +} + +type state13 struct { + paych13.State + store adt.Store + lsAmt *adt13.Array +} + +// Channel owner, who has funded the actor +func (s *state13) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state13) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state13) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state13) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state13) getOrLoadLsAmt() (*adt13.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt13.AsArray(s.store, s.State.LaneStates, paych13.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state13) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state13) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych13.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState13{ls}) + }) +} + +type laneState13 struct { + paych13.LaneState +} + +func (ls *laneState13) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState13) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} + +func (s *state13) ActorKey() string { + return manifest.PaychKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 9b64ded38..e263e3f87 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -9,7 +9,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,8 +27,8 @@ import ( ) var ( - Address = builtin12.StoragePowerActorAddr - Methods = builtin12.MethodsPower + Address = builtin13.StoragePowerActorAddr + Methods = builtin13.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -54,6 +54,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -124,6 +127,9 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) { case actorstypes.Version12: return make12(store) + case actorstypes.Version13: + return make13(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -190,5 +196,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/power/v13.go b/chain/actors/builtin/power/v13.go new file mode 100644 index 000000000..4cf761b16 --- /dev/null +++ b/chain/actors/builtin/power/v13.go @@ -0,0 +1,207 @@ +package power + +import ( + "bytes" + "fmt" + + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + power13 "github.com/filecoin-project/go-state-types/builtin/v13/power" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store) (State, error) { + out := state13{store: store} + + s, err := power13.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + power13.State + store adt.Store +} + +func (s *state13) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state13) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state13) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state13) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power13.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state13) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state13) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state13) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state13) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state13) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power13.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state13) ClaimsChanged(other State) (bool, error) { + other13, ok := other.(*state13) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other13.State.Claims), nil +} + +func (s *state13) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state13) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state13) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state13) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) claims() (adt.Map, error) { + return adt13.AsMap(s.store, s.Claims, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power13.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV13Claim(ci), nil +} + +func fromV13Claim(v13 power13.Claim) Claim { + return Claim{ + RawBytePower: v13.RawBytePower, + QualityAdjPower: v13.QualityAdjPower, + } +} + +func (s *state13) ActorKey() string { + return manifest.PowerKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/registry.go b/chain/actors/builtin/registry.go index 6ba5fef03..93768580b 100644 --- a/chain/actors/builtin/registry.go +++ b/chain/actors/builtin/registry.go @@ -58,6 +58,22 @@ import ( reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward" system12 "github.com/filecoin-project/go-state-types/builtin/v12/system" verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg" + account13 "github.com/filecoin-project/go-state-types/builtin/v13/account" + cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron" + datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap" + eam13 "github.com/filecoin-project/go-state-types/builtin/v13/eam" + ethaccount13 "github.com/filecoin-project/go-state-types/builtin/v13/ethaccount" + evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm" + _init13 "github.com/filecoin-project/go-state-types/builtin/v13/init" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + placeholder13 "github.com/filecoin-project/go-state-types/builtin/v13/placeholder" + power13 "github.com/filecoin-project/go-state-types/builtin/v13/power" + reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward" + system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" account8 "github.com/filecoin-project/go-state-types/builtin/v8/account" cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron" _init8 "github.com/filecoin-project/go-state-types/builtin/v8/init" @@ -617,6 +633,110 @@ func MakeRegistry(av actorstypes.Version) []RegistryEntry { } } + case actorstypes.Version13: + for key, codeID := range codeIDs { + switch key { + case manifest.AccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: account13.Methods, + state: new(account13.State), + }) + case manifest.CronKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: cron13.Methods, + state: new(cron13.State), + }) + case manifest.InitKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: _init13.Methods, + state: new(_init13.State), + }) + case manifest.MarketKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: market13.Methods, + state: new(market13.State), + }) + case manifest.MinerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: miner13.Methods, + state: new(miner13.State), + }) + case manifest.MultisigKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: multisig13.Methods, + state: new(multisig13.State), + }) + case manifest.PaychKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: paych13.Methods, + state: new(paych13.State), + }) + case manifest.PowerKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: power13.Methods, + state: new(power13.State), + }) + case manifest.RewardKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: reward13.Methods, + state: new(reward13.State), + }) + case manifest.SystemKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: system13.Methods, + state: new(system13.State), + }) + case manifest.VerifregKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: verifreg13.Methods, + state: new(verifreg13.State), + }) + case manifest.DatacapKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: datacap13.Methods, + state: new(datacap13.State), + }) + + case manifest.EvmKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: evm13.Methods, + state: new(evm13.State), + }) + case manifest.EamKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: eam13.Methods, + state: nil, + }) + case manifest.PlaceholderKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: placeholder13.Methods, + state: nil, + }) + case manifest.EthAccountKey: + registry = append(registry, RegistryEntry{ + code: codeID, + methods: ethaccount13.Methods, + state: nil, + }) + + } + } + default: panic("expected version v8 and up only, use specs-actors for v0-7") } diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 3c6463645..a6c8bff5b 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.RewardActorAddr - Methods = builtin12.MethodsReward + Address = builtin13.RewardActorAddr + Methods = builtin13.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, currRealizedPower abi.St case actorstypes.Version12: return make12(store, currRealizedPower) + case actorstypes.Version13: + return make13(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -166,5 +172,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/reward/v13.go b/chain/actors/builtin/reward/v13.go new file mode 100644 index 000000000..e8d343cab --- /dev/null +++ b/chain/actors/builtin/reward/v13.go @@ -0,0 +1,120 @@ +package reward + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward" + smoothing13 "github.com/filecoin-project/go-state-types/builtin/v13/util/smoothing" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state13{store: store} + out.State = *reward13.ConstructState(currRealizedPower) + return &out, nil +} + +type state13 struct { + reward13.State + store adt.Store +} + +func (s *state13) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state13) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state13) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state13) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state13) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state13) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state13) CumsumBaseline() (reward13.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state13) CumsumRealized() (reward13.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state13) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner13.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing13.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state13) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner13.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing13.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) ActorKey() string { + return manifest.RewardKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 2a2b703bb..1526a1fc0 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -5,7 +5,7 @@ import ( "golang.org/x/xerrors" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/manifest" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -21,7 +21,7 @@ import ( ) var ( - Address = builtin12.SystemActorAddr + Address = builtin13.SystemActorAddr ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -47,6 +47,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -117,6 +120,9 @@ func MakeState(store adt.Store, av actorstypes.Version, builtinActors cid.Cid) ( case actorstypes.Version12: return make12(store, builtinActors) + case actorstypes.Version13: + return make13(store, builtinActors) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -145,5 +151,6 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/builtin/system/v13.go b/chain/actors/builtin/system/v13.go new file mode 100644 index 000000000..8facf0033 --- /dev/null +++ b/chain/actors/builtin/system/v13.go @@ -0,0 +1,72 @@ +package system + +import ( + "fmt" + + "github.com/ipfs/go-cid" + + actorstypes "github.com/filecoin-project/go-state-types/actors" + system13 "github.com/filecoin-project/go-state-types/builtin/v13/system" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, builtinActors cid.Cid) (State, error) { + out := state13{store: store} + out.State = system13.State{ + BuiltinActors: builtinActors, + } + return &out, nil +} + +type state13 struct { + system13.State + store adt.Store +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetBuiltinActors() cid.Cid { + + return s.State.BuiltinActors + +} + +func (s *state13) SetBuiltinActors(c cid.Cid) error { + + s.State.BuiltinActors = c + return nil + +} + +func (s *state13) ActorKey() string { + return manifest.SystemKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/actor.go.template b/chain/actors/builtin/verifreg/actor.go.template index 991c6717b..f4467d979 100644 --- a/chain/actors/builtin/verifreg/actor.go.template +++ b/chain/actors/builtin/verifreg/actor.go.template @@ -81,8 +81,10 @@ type State interface { ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetAllocation(clientIdAddr address.Address, allocationId AllocationId) (*Allocation, bool, error) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) + GetAllAllocations() (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetAllClaims() (map[ClaimId]Claim, error) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } diff --git a/chain/actors/builtin/verifreg/state.go.template b/chain/actors/builtin/verifreg/state.go.template index adcbc22c2..7835b16b6 100644 --- a/chain/actors/builtin/verifreg/state.go.template +++ b/chain/actors/builtin/verifreg/state.go.template @@ -145,6 +145,21 @@ func (s *state{{.v}}) GetAllocations(clientIdAddr address.Address) (map[Allocati {{end}} } +func (s *state{{.v}}) GetAllAllocations() (map[AllocationId]Allocation, error) { +{{if (le .v 8)}} + return nil, xerrors.Errorf("unsupported in actors v{{.v}}") +{{else}} + v{{.v}}Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v{{.v}}Map)) + for k, v := range v{{.v}}Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err +{{end}} +} + func (s *state{{.v}}) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { {{if (le .v 8)}} return nil, false, xerrors.Errorf("unsupported in actors v{{.v}}") @@ -170,6 +185,22 @@ func (s *state{{.v}}) GetClaims(providerIdAddr address.Address) (map[ClaimId]Cla {{end}} } +func (s *state{{.v}}) GetAllClaims() (map[ClaimId]Claim, error) { +{{if (le .v 8)}} + return nil, xerrors.Errorf("unsupported in actors v{{.v}}") +{{else}} + v{{.v}}Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v{{.v}}Map)) + for k, v := range v{{.v}}Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +{{end}} +} + func (s *state{{.v}}) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { {{if (le .v 8)}} return nil, xerrors.Errorf("unsupported in actors v{{.v}}") diff --git a/chain/actors/builtin/verifreg/v0.go b/chain/actors/builtin/verifreg/v0.go index 9913c42c0..4129e7a2d 100644 --- a/chain/actors/builtin/verifreg/v0.go +++ b/chain/actors/builtin/verifreg/v0.go @@ -106,6 +106,12 @@ func (s *state0) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state0) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v0") + +} + func (s *state0) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v0") @@ -118,6 +124,12 @@ func (s *state0) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state0) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v0") + +} + func (s *state0) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v0") diff --git a/chain/actors/builtin/verifreg/v10.go b/chain/actors/builtin/verifreg/v10.go index 256f4d2f8..85f85c7f2 100644 --- a/chain/actors/builtin/verifreg/v10.go +++ b/chain/actors/builtin/verifreg/v10.go @@ -114,6 +114,19 @@ func (s *state10) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state10) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v10Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v10Map)) + for k, v := range v10Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state10) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg10.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state10) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state10) GetAllClaims() (map[ClaimId]Claim, error) { + + v10Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v10Map)) + for k, v := range v10Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state10) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v10Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v11.go b/chain/actors/builtin/verifreg/v11.go index 7b7b9e4c0..5a8e1cb0c 100644 --- a/chain/actors/builtin/verifreg/v11.go +++ b/chain/actors/builtin/verifreg/v11.go @@ -114,6 +114,19 @@ func (s *state11) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state11) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v11Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v11Map)) + for k, v := range v11Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state11) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg11.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state11) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state11) GetAllClaims() (map[ClaimId]Claim, error) { + + v11Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v11Map)) + for k, v := range v11Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state11) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v11Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v12.go b/chain/actors/builtin/verifreg/v12.go index 77a113fbe..7c9a493f1 100644 --- a/chain/actors/builtin/verifreg/v12.go +++ b/chain/actors/builtin/verifreg/v12.go @@ -114,6 +114,19 @@ func (s *state12) GetAllocations(clientIdAddr address.Address) (map[AllocationId } +func (s *state12) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v12Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v12Map)) + for k, v := range v12Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state12) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg12.ClaimId(claimId)) @@ -134,6 +147,19 @@ func (s *state12) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, } +func (s *state12) GetAllClaims() (map[ClaimId]Claim, error) { + + v12Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v12Map)) + for k, v := range v12Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state12) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v12Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/v13.go b/chain/actors/builtin/verifreg/v13.go new file mode 100644 index 000000000..0c487a2f7 --- /dev/null +++ b/chain/actors/builtin/verifreg/v13.go @@ -0,0 +1,196 @@ +package verifreg + +import ( + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" + builtin13 "github.com/filecoin-project/go-state-types/builtin" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + "github.com/filecoin-project/go-state-types/manifest" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var _ State = (*state13)(nil) + +func load13(store adt.Store, root cid.Cid) (State, error) { + out := state13{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make13(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state13{store: store} + + s, err := verifreg13.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state13 struct { + verifreg13.State + store adt.Store +} + +func (s *state13) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state13) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + + return false, big.Zero(), xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version13, s.verifiers, addr) +} + +func (s *state13) RemoveDataCapProposalID(verifier address.Address, client address.Address) (bool, uint64, error) { + return getRemoveDataCapProposalID(s.store, actors.Version13, s.removeDataCapProposalIDs, verifier, client) +} + +func (s *state13) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version13, s.verifiers, cb) +} + +func (s *state13) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + + return xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) verifiedClients() (adt.Map, error) { + + return nil, xerrors.Errorf("unsupported in actors v13") + +} + +func (s *state13) verifiers() (adt.Map, error) { + return adt13.AsMap(s.store, s.Verifiers, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) removeDataCapProposalIDs() (adt.Map, error) { + return adt13.AsMap(s.store, s.RemoveDataCapProposalIDs, builtin13.DefaultHamtBitwidth) +} + +func (s *state13) GetState() interface{} { + return &s.State +} + +func (s *state13) GetAllocation(clientIdAddr address.Address, allocationId verifreg9.AllocationId) (*Allocation, bool, error) { + + alloc, ok, err := s.FindAllocation(s.store, clientIdAddr, verifreg13.AllocationId(allocationId)) + return (*Allocation)(alloc), ok, err +} + +func (s *state13) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) { + + v13Map, err := s.LoadAllocationsToMap(s.store, clientIdAddr) + + retMap := make(map[AllocationId]Allocation, len(v13Map)) + for k, v := range v13Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state13) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v13Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v13Map)) + for k, v := range v13Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + +func (s *state13) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { + + claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg13.ClaimId(claimId)) + return (*Claim)(claim), ok, err + +} + +func (s *state13) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) { + + v13Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[ClaimId]Claim, len(v13Map)) + for k, v := range v13Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state13) GetAllClaims() (map[ClaimId]Claim, error) { + + v13Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v13Map)) + for k, v := range v13Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + +func (s *state13) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { + + v13Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) + + retMap := make(map[abi.SectorNumber][]ClaimId) + for k, v := range v13Map { + claims, ok := retMap[v.Sector] + if !ok { + retMap[v.Sector] = []ClaimId{ClaimId(k)} + } else { + retMap[v.Sector] = append(claims, ClaimId(k)) + } + } + + return retMap, err + +} + +func (s *state13) ActorKey() string { + return manifest.VerifregKey +} + +func (s *state13) ActorVersion() actorstypes.Version { + return actorstypes.Version13 +} + +func (s *state13) Code() cid.Cid { + code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey()) + if !ok { + panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion())) + } + + return code +} diff --git a/chain/actors/builtin/verifreg/v2.go b/chain/actors/builtin/verifreg/v2.go index 31f7f775d..7f71639e6 100644 --- a/chain/actors/builtin/verifreg/v2.go +++ b/chain/actors/builtin/verifreg/v2.go @@ -106,6 +106,12 @@ func (s *state2) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state2) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v2") + +} + func (s *state2) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v2") @@ -118,6 +124,12 @@ func (s *state2) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state2) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v2") + +} + func (s *state2) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v2") diff --git a/chain/actors/builtin/verifreg/v3.go b/chain/actors/builtin/verifreg/v3.go index 3ea016fd5..3e8ea9a1f 100644 --- a/chain/actors/builtin/verifreg/v3.go +++ b/chain/actors/builtin/verifreg/v3.go @@ -107,6 +107,12 @@ func (s *state3) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state3) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v3") + +} + func (s *state3) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v3") @@ -119,6 +125,12 @@ func (s *state3) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state3) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v3") + +} + func (s *state3) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v3") diff --git a/chain/actors/builtin/verifreg/v4.go b/chain/actors/builtin/verifreg/v4.go index 464cc9fdc..1dc438864 100644 --- a/chain/actors/builtin/verifreg/v4.go +++ b/chain/actors/builtin/verifreg/v4.go @@ -107,6 +107,12 @@ func (s *state4) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state4) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v4") + +} + func (s *state4) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v4") @@ -119,6 +125,12 @@ func (s *state4) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state4) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v4") + +} + func (s *state4) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v4") diff --git a/chain/actors/builtin/verifreg/v5.go b/chain/actors/builtin/verifreg/v5.go index 17901dd23..a7505330c 100644 --- a/chain/actors/builtin/verifreg/v5.go +++ b/chain/actors/builtin/verifreg/v5.go @@ -107,6 +107,12 @@ func (s *state5) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state5) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v5") + +} + func (s *state5) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v5") @@ -119,6 +125,12 @@ func (s *state5) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state5) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v5") + +} + func (s *state5) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v5") diff --git a/chain/actors/builtin/verifreg/v6.go b/chain/actors/builtin/verifreg/v6.go index 68fac64cb..93424152e 100644 --- a/chain/actors/builtin/verifreg/v6.go +++ b/chain/actors/builtin/verifreg/v6.go @@ -107,6 +107,12 @@ func (s *state6) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state6) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v6") + +} + func (s *state6) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v6") @@ -119,6 +125,12 @@ func (s *state6) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state6) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v6") + +} + func (s *state6) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v6") diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go index e8f3ac739..bd67aee5f 100644 --- a/chain/actors/builtin/verifreg/v7.go +++ b/chain/actors/builtin/verifreg/v7.go @@ -106,6 +106,12 @@ func (s *state7) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state7) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v7") + +} + func (s *state7) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v7") @@ -118,6 +124,12 @@ func (s *state7) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state7) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v7") + +} + func (s *state7) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v7") diff --git a/chain/actors/builtin/verifreg/v8.go b/chain/actors/builtin/verifreg/v8.go index 89393c4d9..1515c1c5b 100644 --- a/chain/actors/builtin/verifreg/v8.go +++ b/chain/actors/builtin/verifreg/v8.go @@ -106,6 +106,12 @@ func (s *state8) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state8) GetAllAllocations() (map[AllocationId]Allocation, error) { + + return nil, xerrors.Errorf("unsupported in actors v8") + +} + func (s *state8) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { return nil, false, xerrors.Errorf("unsupported in actors v8") @@ -118,6 +124,12 @@ func (s *state8) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state8) GetAllClaims() (map[ClaimId]Claim, error) { + + return nil, xerrors.Errorf("unsupported in actors v8") + +} + func (s *state8) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { return nil, xerrors.Errorf("unsupported in actors v8") diff --git a/chain/actors/builtin/verifreg/v9.go b/chain/actors/builtin/verifreg/v9.go index ce63c7f94..41422615b 100644 --- a/chain/actors/builtin/verifreg/v9.go +++ b/chain/actors/builtin/verifreg/v9.go @@ -113,6 +113,19 @@ func (s *state9) GetAllocations(clientIdAddr address.Address) (map[AllocationId] } +func (s *state9) GetAllAllocations() (map[AllocationId]Allocation, error) { + + v9Map, err := s.State.GetAllAllocations(s.store) + + retMap := make(map[AllocationId]Allocation, len(v9Map)) + for k, v := range v9Map { + retMap[AllocationId(k)] = Allocation(v) + } + + return retMap, err + +} + func (s *state9) GetClaim(providerIdAddr address.Address, claimId verifreg9.ClaimId) (*Claim, bool, error) { claim, ok, err := s.FindClaim(s.store, providerIdAddr, verifreg9.ClaimId(claimId)) @@ -133,6 +146,19 @@ func (s *state9) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, e } +func (s *state9) GetAllClaims() (map[ClaimId]Claim, error) { + + v9Map, err := s.State.GetAllClaims(s.store) + + retMap := make(map[ClaimId]Claim, len(v9Map)) + for k, v := range v9Map { + retMap[ClaimId(k)] = Claim(v) + } + + return retMap, err + +} + func (s *state9) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) { v9Map, err := s.LoadClaimsToMap(s.store, providerIdAddr) diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index de906f521..2d66d9028 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -7,7 +7,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" - builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/manifest" @@ -25,8 +25,8 @@ import ( ) var ( - Address = builtin12.VerifiedRegistryActorAddr - Methods = builtin12.MethodsVerifiedRegistry + Address = builtin13.VerifiedRegistryActorAddr + Methods = builtin13.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -52,6 +52,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case actorstypes.Version12: return load12(store, act.Head) + case actorstypes.Version13: + return load13(store, act.Head) + } } @@ -122,6 +125,9 @@ func MakeState(store adt.Store, av actorstypes.Version, rootKeyAddress address.A case actorstypes.Version12: return make12(store, rootKeyAddress) + case actorstypes.Version13: + return make13(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -141,8 +147,10 @@ type State interface { ForEachClient(func(addr address.Address, dcap abi.StoragePower) error) error GetAllocation(clientIdAddr address.Address, allocationId AllocationId) (*Allocation, bool, error) GetAllocations(clientIdAddr address.Address) (map[AllocationId]Allocation, error) + GetAllAllocations() (map[AllocationId]Allocation, error) GetClaim(providerIdAddr address.Address, claimId ClaimId) (*Claim, bool, error) GetClaims(providerIdAddr address.Address) (map[ClaimId]Claim, error) + GetAllClaims() (map[ClaimId]Claim, error) GetClaimIdsBySector(providerIdAddr address.Address) (map[abi.SectorNumber][]ClaimId, error) GetState() interface{} } @@ -161,6 +169,7 @@ func AllCodes() []cid.Cid { (&state10{}).Code(), (&state11{}).Code(), (&state12{}).Code(), + (&state13{}).Code(), } } diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 6d2b41154..b8d23903c 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -9,6 +9,7 @@ import ( builtin10 "github.com/filecoin-project/go-state-types/builtin" builtin11 "github.com/filecoin-project/go-state-types/builtin" builtin12 "github.com/filecoin-project/go-state-types/builtin" + builtin13 "github.com/filecoin-project/go-state-types/builtin" builtin8 "github.com/filecoin-project/go-state-types/builtin" builtin9 "github.com/filecoin-project/go-state-types/builtin" market10 "github.com/filecoin-project/go-state-types/builtin/v10/market" @@ -19,8 +20,11 @@ import ( verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg" market12 "github.com/filecoin-project/go-state-types/builtin/v12/market" miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner" - paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych" verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner" verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg" @@ -59,14 +63,14 @@ import ( ) const ( - ChainFinality = miner12.ChainFinality + ChainFinality = miner13.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych12.SettleDelay - MaxPreCommitRandomnessLookback = builtin12.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych13.SettleDelay + MaxPreCommitRandomnessLookback = builtin13.EpochsInDay + SealRandomnessLookback ) var ( - MarketDefaultAllocationTermBuffer = market12.MarketDefaultAllocationTermBuffer + MarketDefaultAllocationTermBuffer = market13.MarketDefaultAllocationTermBuffer ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -181,11 +185,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner12.PreCommitChallengeDelay = delay + miner13.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner12.PreCommitChallengeDelay + return miner13.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -239,6 +245,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin13.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -269,6 +279,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg12.MinVerifiedDealSize = size + verifreg13.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -322,6 +334,10 @@ func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProo return miner12.MaxProveCommitDuration[t], nil + case actorstypes.Version13: + + return miner13.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -387,6 +403,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market13.ProviderCollateralSupplyTarget = builtin13.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -460,13 +481,18 @@ func DealProviderCollateralBounds( min, max := market12.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actorstypes.Version13: + + min, max := market13.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market12.DealDurationBounds(pieceSize) + return market13.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -549,6 +575,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner12.WPoStDisputeWindow = period * 30 + miner13.WPoStChallengeWindow = period + miner13.WPoStProvingPeriod = period * abi.ChainEpoch(miner13.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner13.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -603,6 +636,9 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) case actorstypes.Version12: return miner12.MaxSectorExpirationExtension, nil + case actorstypes.Version13: + return miner13.MaxSectorExpirationExtension, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -610,11 +646,11 @@ func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) } func GetMinSectorExpiration() abi.ChainEpoch { - return miner12.MinSectorExpiration + return miner13.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin12.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin13.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -623,7 +659,7 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e return 0, err } - return min(miner12.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil + return min(miner13.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil } func GetDefaultAggregationProof() abi.RegisteredAggregationProof { @@ -635,7 +671,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin12.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin13.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -681,6 +717,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actorstypes.Version12: return miner12.AddressedSectorsMax, nil + case actorstypes.Version13: + return miner13.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -742,6 +781,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner12.DeclarationsMax, nil + case actorstypes.Version13: + + return miner13.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -802,6 +845,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner12.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version13: + + return miner13.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -862,6 +909,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner12.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actorstypes.Version13: + + return miner13.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 92c0da006..8d84bbc1d 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -14,9 +14,9 @@ const ({{range .actorVersions}} /* inline-gen start */ -var LatestVersion = 12 +var LatestVersion = 13 -var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} +var Versions = []int{0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13} const ( Version0 Version = 0 @@ -31,6 +31,7 @@ const ( Version10 Version = 10 Version11 Version = 11 Version12 Version = 12 + Version13 Version = 13 ) /* inline-gen end */ diff --git a/chain/beacon/beacon.go b/chain/beacon/beacon.go index aa76bcffe..45baa6624 100644 --- a/chain/beacon/beacon.go +++ b/chain/beacon/beacon.go @@ -43,31 +43,31 @@ type BeaconPoint struct { // been posted on chain. type RandomBeacon interface { Entry(context.Context, uint64) <-chan Response - VerifyEntry(types.BeaconEntry, types.BeaconEntry) error + VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte) error MaxBeaconRoundForEpoch(network.Version, abi.ChainEpoch) uint64 + IsChained() bool } func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockHeader, parentEpoch abi.ChainEpoch, prevEntry types.BeaconEntry) error { - { - parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) - currBeacon := bSchedule.BeaconForEpoch(h.Height) - if parentBeacon != currBeacon { - if len(h.BeaconEntries) != 2 { - return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) - } - err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0]) - if err != nil { - return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w", - h.BeaconEntries[1], h.BeaconEntries[0], err) - } - return nil + + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(h.Height) + // When we have "chained" beacons, two entries at a fork are required. + if parentBeacon != currBeacon && currBeacon.IsChained() { + if len(h.BeaconEntries) != 2 { + return xerrors.Errorf("expected two beacon entries at beacon fork, got %d", len(h.BeaconEntries)) } + err := currBeacon.VerifyEntry(h.BeaconEntries[1], h.BeaconEntries[0].Data) + if err != nil { + return xerrors.Errorf("beacon at fork point invalid: (%v, %v): %w", + h.BeaconEntries[1], h.BeaconEntries[0], err) + } + return nil } - // TODO: fork logic - b := bSchedule.BeaconForEpoch(h.Height) - maxRound := b.MaxBeaconRoundForEpoch(nv, h.Height) + maxRound := currBeacon.MaxBeaconRoundForEpoch(nv, h.Height) + // We don't expect to ever actually meet this condition if maxRound == prevEntry.Round { if len(h.BeaconEntries) != 0 { return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries)) @@ -79,13 +79,31 @@ func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockH return xerrors.Errorf("expected to have beacon entries in this block, but didn't find any") } + // We skip verifying the genesis entry when randomness is "chained". + if currBeacon.IsChained() && prevEntry.Round == 0 { + return nil + } + last := h.BeaconEntries[len(h.BeaconEntries)-1] if last.Round != maxRound { return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round) } + // If the beacon is UNchained, verify that the block only includes the rounds we want for the epochs in between parentEpoch and h.Height + // For chained beacons, you must have all the rounds forming a valid chain with prevEntry, so we can skip this step + if !currBeacon.IsChained() { + // Verify that all other entries' rounds are as expected for the epochs in between parentEpoch and h.Height + for i, e := range h.BeaconEntries { + correctRound := currBeacon.MaxBeaconRoundForEpoch(nv, parentEpoch+abi.ChainEpoch(i)+1) + if e.Round != correctRound { + return xerrors.Errorf("unexpected beacon round %d, expected %d for epoch %d", e.Round, correctRound, parentEpoch+abi.ChainEpoch(i)) + } + } + } + + // Verify the beacon entries themselves for i, e := range h.BeaconEntries { - if err := b.VerifyEntry(e, prevEntry); err != nil { + if err := currBeacon.VerifyEntry(e, prevEntry.Data); err != nil { return xerrors.Errorf("beacon entry %d (%d - %x (%d)) was invalid: %w", i, e.Round, e.Data, len(e.Data), err) } prevEntry = e @@ -95,34 +113,32 @@ func ValidateBlockValues(bSchedule Schedule, nv network.Version, h *types.BlockH } func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.Version, epoch abi.ChainEpoch, parentEpoch abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) { - { - parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) - currBeacon := bSchedule.BeaconForEpoch(epoch) - if parentBeacon != currBeacon { - // Fork logic - round := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) - out := make([]types.BeaconEntry, 2) - rch := currBeacon.Entry(ctx, round-1) - res := <-rch - if res.Err != nil { - return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err) - } - out[0] = res.Entry - rch = currBeacon.Entry(ctx, round) - res = <-rch - if res.Err != nil { - return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err) - } - out[1] = res.Entry - return out, nil + // When we have "chained" beacons, two entries at a fork are required. + parentBeacon := bSchedule.BeaconForEpoch(parentEpoch) + currBeacon := bSchedule.BeaconForEpoch(epoch) + if parentBeacon != currBeacon && currBeacon.IsChained() { + // Fork logic + round := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) + out := make([]types.BeaconEntry, 2) + rch := currBeacon.Entry(ctx, round-1) + res := <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round-1, res.Err) } + out[0] = res.Entry + rch = currBeacon.Entry(ctx, round) + res = <-rch + if res.Err != nil { + return nil, xerrors.Errorf("getting entry %d returned error: %w", round, res.Err) + } + out[1] = res.Entry + return out, nil } - beacon := bSchedule.BeaconForEpoch(epoch) - start := build.Clock.Now() - maxRound := beacon.MaxBeaconRoundForEpoch(nv, epoch) + maxRound := currBeacon.MaxBeaconRoundForEpoch(nv, epoch) + // We don't expect this to ever be the case if maxRound == prev.Round { return nil, nil } @@ -132,10 +148,10 @@ func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.V prev.Round = maxRound - 1 } - cur := maxRound var out []types.BeaconEntry - for cur > prev.Round { - rch := beacon.Entry(ctx, cur) + for currEpoch := epoch; currEpoch > parentEpoch; currEpoch-- { + currRound := currBeacon.MaxBeaconRoundForEpoch(nv, currEpoch) + rch := currBeacon.Entry(ctx, currRound) select { case resp := <-rch: if resp.Err != nil { @@ -143,7 +159,6 @@ func BeaconEntriesForBlock(ctx context.Context, bSchedule Schedule, nv network.V } out = append(out, resp.Entry) - cur = resp.Entry.Round - 1 case <-ctx.Done(): return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for epoch %d: %w", epoch, ctx.Err()) } diff --git a/chain/beacon/drand/drand.go b/chain/beacon/drand/drand.go index 5825fa691..b5f5917ef 100644 --- a/chain/beacon/drand/drand.go +++ b/chain/beacon/drand/drand.go @@ -8,7 +8,7 @@ import ( dchain "github.com/drand/drand/chain" dclient "github.com/drand/drand/client" hclient "github.com/drand/drand/client/http" - "github.com/drand/drand/common/scheme" + dcrypto "github.com/drand/drand/crypto" dlog "github.com/drand/drand/log" gclient "github.com/drand/drand/lp2p/client" "github.com/drand/kyber" @@ -37,7 +37,8 @@ var log = logging.Logger("drand") // // The root trust for the Drand chain is configured from build.DrandChain. type DrandBeacon struct { - client dclient.Client + isChained bool + client dclient.Client pubkey kyber.Point @@ -47,10 +48,18 @@ type DrandBeacon struct { drandGenTime uint64 filGenTime uint64 filRoundTime uint64 + scheme *dcrypto.Scheme localCache *lru.Cache[uint64, *types.BeaconEntry] } +// IsChained tells us whether this particular beacon operates in "chained mode". Prior to Drand +// quicknet, beacons form a chain. After the introduction of quicknet, they do not, so we need to +// change how we interact with beacon entries. (See FIP-0063) +func (db *DrandBeacon) IsChained() bool { + return db.isChained +} + // DrandHTTPClient interface overrides the user agent used by drand type DrandHTTPClient interface { SetUserAgent(string) @@ -68,6 +77,10 @@ func (l *logger) Named(s string) dlog.Logger { return &logger{l.SugaredLogger.Named(s)} } +func (l *logger) AddCallerSkip(skip int) dlog.Logger { + return &logger{l.SugaredLogger.With(zap.AddCallerSkip(skip))} +} + func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { if genesisTs == 0 { panic("what are you doing this cant be zero") @@ -112,10 +125,16 @@ func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes } db := &DrandBeacon{ + isChained: config.IsChained, client: client, localCache: lc, } + sch, err := dcrypto.GetSchemeByIDWithDefault(drandChain.Scheme) + if err != nil { + return nil, err + } + db.scheme = sch db.pubkey = drandChain.PublicKey db.interval = drandChain.Period db.drandGenTime = uint64(drandChain.GenesisTime) @@ -164,33 +183,28 @@ func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { return v } -func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { - if prev.Round == 0 { - // TODO handle genesis better - return nil - } - - if curr.Round != prev.Round+1 { - return xerrors.Errorf("invalid beacon entry: cur (%d) != prev (%d) + 1", curr.Round, prev.Round) - } - - if be := db.getCachedValue(curr.Round); be != nil { - if !bytes.Equal(curr.Data, be.Data) { +func (db *DrandBeacon) VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte) error { + if be := db.getCachedValue(entry.Round); be != nil { + if !bytes.Equal(entry.Data, be.Data) { return xerrors.New("invalid beacon value, does not match cached good value") } // return no error if the value is in the cache already return nil } b := &dchain.Beacon{ - PreviousSig: prev.Data, - Round: curr.Round, - Signature: curr.Data, + PreviousSig: prevEntrySig, + Round: entry.Round, + Signature: entry.Data, } - err := dchain.NewVerifier(scheme.GetSchemeFromEnv()).VerifyBeacon(*b, db.pubkey) - if err == nil { - db.cacheValue(curr) + + err := db.scheme.VerifyBeacon(b, db.pubkey) + if err != nil { + return xerrors.Errorf("failed to verify beacon: %w", err) } - return err + + db.cacheValue(entry) + + return nil } func (db *DrandBeacon) MaxBeaconRoundForEpoch(nv network.Version, filEpoch abi.ChainEpoch) uint64 { diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index 7434241a5..339aaeaad 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -33,7 +33,15 @@ func TestMaxBeaconRoundForEpoch(t *testing.T) { todayTs := uint64(1652222222) db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandTestnet]) assert.NoError(t, err) + assert.True(t, db.IsChained()) mbr15 := db.MaxBeaconRoundForEpoch(network.Version15, 100) mbr16 := db.MaxBeaconRoundForEpoch(network.Version16, 100) assert.Equal(t, mbr15+1, mbr16) } + +func TestQuicknetIsChained(t *testing.T) { + todayTs := uint64(1652222222) + db, err := NewDrandBeacon(todayTs, build.BlockDelaySecs, nil, build.DrandConfigs[build.DrandQuicknet]) + assert.NoError(t, err) + assert.False(t, db.IsChained()) +} diff --git a/chain/beacon/mock.go b/chain/beacon/mock.go index 3f26da109..ab6a98ebf 100644 --- a/chain/beacon/mock.go +++ b/chain/beacon/mock.go @@ -20,6 +20,10 @@ type mockBeacon struct { interval time.Duration } +func (mb *mockBeacon) IsChained() bool { + return true +} + func NewMockBeacon(interval time.Duration) RandomBeacon { mb := &mockBeacon{interval: interval} @@ -47,7 +51,7 @@ func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { return out } -func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry) error { +func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, _prevEntrySig []byte) error { // TODO: cache this, especially for bls oe := mb.entryForIndex(from.Round) if !bytes.Equal(from.Data, oe.Data) { diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 4b993b3e7..78369ec20 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -53,6 +53,7 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(actorstypes.Version10, vm.ActorsVersionPredicate(actorstypes.Version10), builtin.MakeRegistry(actorstypes.Version10)) inv.Register(actorstypes.Version11, vm.ActorsVersionPredicate(actorstypes.Version11), builtin.MakeRegistry(actorstypes.Version11)) inv.Register(actorstypes.Version12, vm.ActorsVersionPredicate(actorstypes.Version12), builtin.MakeRegistry(actorstypes.Version12)) + inv.Register(actorstypes.Version13, vm.ActorsVersionPredicate(actorstypes.Version13), builtin.MakeRegistry(actorstypes.Version13)) return inv } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index bb70d5d11..327c33c35 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -129,6 +129,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } + // TODO: Optimization: See https://github.com/filecoin-project/lotus/issues/11597 prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index d7e0914fe..26c93a0f3 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -23,7 +23,10 @@ import ( init11 "github.com/filecoin-project/go-state-types/builtin/v11/init" nv19 "github.com/filecoin-project/go-state-types/builtin/v11/migration" system11 "github.com/filecoin-project/go-state-types/builtin/v11/system" + init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" nv21 "github.com/filecoin-project/go-state-types/builtin/v12/migration" + system12 "github.com/filecoin-project/go-state-types/builtin/v12/system" + nv22 "github.com/filecoin-project/go-state-types/builtin/v13/migration" nv17 "github.com/filecoin-project/go-state-types/builtin/v9/migration" "github.com/filecoin-project/go-state-types/manifest" "github.com/filecoin-project/go-state-types/migration" @@ -280,12 +283,26 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { Height: build.UpgradeWatermelonFixHeight, Network: network.Version21, Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID1, calibnetv12BuggyManifestCID2), + }, { + Height: build.UpgradeWatermelonFix2Height, + Network: network.Version21, + Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID2, calibnetv12CorrectManifestCID1), + }, { + Height: build.UpgradeDragonHeight, + Network: network.Version22, + Migration: UpgradeActorsV13, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV13, + StartWithin: 120, + DontStartWithin: 15, + StopWithin: 10, + }}, + Expensive: true, + }, { + Height: build.UpgradeCalibrationDragonFixHeight, + Network: network.Version22, + Migration: upgradeActorsV13VerifregFix(calibnetv13BuggyVerifregCID1, calibnetv13CorrectManifestCID1), }, - { - Height: build.UpgradeWatermelonFix2Height, - Network: network.Version21, - Migration: buildUpgradeActorsV12MinerFix(calibnetv12BuggyMinerCID2, calibnetv12CorrectManifestCID1), - }, } for _, u := range updates { @@ -1891,6 +1908,13 @@ var ( calibnetv12BuggyManifestCID1 = cid.MustParse("bafy2bzacedrunxfqta5skb7q7x32lnp4efz2oq7fn226ffm7fu5iqs62jkmvs") calibnetv12BuggyManifestCID2 = cid.MustParse("bafy2bzacebl4w5ptfvuw6746w7ev562idkbf5ppq72e6zub22435ws2rukzru") calibnetv12CorrectManifestCID1 = cid.MustParse("bafy2bzacednzb3pkrfnbfhmoqtb3bc6dgvxszpqklf3qcc7qzcage4ewzxsca") + + calibnetv13BuggyVerifregCID1 = cid.MustParse("bafk2bzacednskl3bykz5qpo54z2j2p4q44t5of4ktd6vs6ymmg2zebsbxazkm") + + calibnetv13BuggyBundleSuffix1 = "calibrationnet-13-rc3" + + calibnetv13BuggyManifestCID1 = cid.MustParse("bafy2bzacea4firkyvt2zzdwqjrws5pyeluaesh6uaid246tommayr4337xpmi") + calibnetv13CorrectManifestCID1 = cid.MustParse("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs") ) func upgradeActorsV12Common( @@ -2146,6 +2170,304 @@ func buildUpgradeActorsV12MinerFix(oldBuggyMinerCID, newManifestCID cid.Cid) fun } } +func PreUpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := MigrationMaxWorkerCount + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch) + if err != nil { + return xerrors.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := migration.Config{ + MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5, + UpgradeEpoch: build.UpgradeDragonHeight, + } + + _, err = upgradeActorsV13Common(ctx, sm, cache, lbRoot, epoch, lbts, config) + return err +} + +func UpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 2. + workerCount := MigrationMaxWorkerCount - 3 + if workerCount <= 0 { + workerCount = 1 + } + config := migration.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + UpgradeEpoch: build.UpgradeDragonHeight, + } + newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err) + } + return newRoot, nil +} + +func upgradeActorsV13Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config migration.Config, +) (cid.Cid, error) { + writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB/4) + adtStore := store.ActorStore(ctx, writeStore) + // ensure that the manifest is loaded in the blockstore + if err := bundle.LoadBundles(ctx, writeStore, actorstypes.Version13); err != nil { + return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err) + } + + // Load the state root. + var stateRoot types.StateRoot + if err := adtStore.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion5 { + return cid.Undef, xerrors.Errorf( + "expected state root version 5 for actors v13 upgrade, got %d", + stateRoot.Version, + ) + } + + // check whether or not this is a calibnet upgrade + // we do this because calibnet upgraded to a "wrong" actors bundle, which was then corrected + // we thus upgrade to calibrationnet-buggy in this upgrade + actorsIn, err := state.LoadStateTree(adtStore, root) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree: %w", err) + } + + initActor, err := actorsIn.GetActor(builtin.InitActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + var initState init12.State + if err := adtStore.Get(ctx, initActor.Head, &initState); err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) + } + + var manifestCid cid.Cid + if initState.NetworkName == "calibrationnet" { + embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version13, calibnetv13BuggyBundleSuffix1) + if !ok { + return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle") + } + + var err error + manifestCid, err = bundle.LoadBundle(ctx, writeStore, bytes.NewReader(embedded)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err) + } + + if manifestCid != calibnetv13BuggyManifestCID1 { + return cid.Undef, xerrors.Errorf("didn't find expected buggy calibnet bundle manifest: %s != %s", manifestCid, calibnetv12BuggyManifestCID1) + } + } else { + ok := false + manifestCid, ok = actors.GetManifest(actorstypes.Version13) + if !ok { + return cid.Undef, xerrors.Errorf("no manifest CID for v13 upgrade") + } + } + + // Perform the migration + newHamtRoot, err := nv22.MigrateStateTree(ctx, adtStore, manifestCid, stateRoot.Actors, epoch, config, + migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v13: %w", err) + } + + // Persist the result. + newRoot, err := adtStore.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion5, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err) + } + + return newRoot, nil +} + +// //////////////////// +func upgradeActorsV13VerifregFix(oldBuggyVerifregCID, newManifestCID cid.Cid) func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + return func(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + stateStore := sm.ChainStore().StateBlockstore() + adtStore := store.ActorStore(ctx, stateStore) + + // ensure that the manifest is loaded in the blockstore + + // this loads the "correct" bundle for UpgradeCalibrationDragonFixHeight + if err := bundle.LoadBundles(ctx, stateStore, actorstypes.Version13); err != nil { + return cid.Undef, xerrors.Errorf("failed to load manifest bundle: %w", err) + } + + // this loads the buggy bundle, for UpgradeDragonHeight + embedded, ok := build.GetEmbeddedBuiltinActorsBundle(actorstypes.Version13, calibnetv13BuggyBundleSuffix1) + if !ok { + return cid.Undef, xerrors.Errorf("didn't find buggy calibrationnet bundle") + } + + _, err := bundle.LoadBundle(ctx, stateStore, bytes.NewReader(embedded)) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to load buggy calibnet bundle: %w", err) + } + + // now confirm we have the one we're migrating to + if haveManifest, err := stateStore.Has(ctx, newManifestCID); err != nil { + return cid.Undef, xerrors.Errorf("blockstore error when loading manifest %s: %w", newManifestCID, err) + } else if !haveManifest { + return cid.Undef, xerrors.Errorf("missing new manifest %s in blockstore", newManifestCID) + } + + // Load input state tree + actorsIn, err := state.LoadStateTree(adtStore, root) + if err != nil { + return cid.Undef, xerrors.Errorf("loading state tree: %w", err) + } + + // load old manifest data + systemActor, err := actorsIn.GetActor(builtin.SystemActorAddr) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) + } + + var systemState system12.State + if err := adtStore.Get(ctx, systemActor.Head, &systemState); err != nil { + return cid.Undef, xerrors.Errorf("failed to get system actor state: %w", err) + } + + var oldManifestData manifest.ManifestData + if err := adtStore.Get(ctx, systemState.BuiltinActors, &oldManifestData); err != nil { + return cid.Undef, xerrors.Errorf("failed to get old manifest data: %w", err) + } + + // load new manifest + var newManifest manifest.Manifest + if err := adtStore.Get(ctx, newManifestCID, &newManifest); err != nil { + return cid.Undef, xerrors.Errorf("error reading actor manifest: %w", err) + } + + if err := newManifest.Load(ctx, adtStore); err != nil { + return cid.Undef, xerrors.Errorf("error loading actor manifest: %w", err) + } + + // build the CID mapping + codeMapping := make(map[cid.Cid]cid.Cid, len(oldManifestData.Entries)) + for _, oldEntry := range oldManifestData.Entries { + newCID, ok := newManifest.Get(oldEntry.Name) + if !ok { + return cid.Undef, xerrors.Errorf("missing manifest entry for %s", oldEntry.Name) + } + + // Note: we expect newCID to be the same as oldEntry.Code for all actors except the verifreg actor + codeMapping[oldEntry.Code] = newCID + } + + // Create empty actorsOut + + actorsOut, err := state.NewStateTree(adtStore, actorsIn.Version()) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to create new tree: %w", err) + } + + // Perform the migration + err = actorsIn.ForEach(func(a address.Address, actor *types.Actor) error { + newCid, ok := codeMapping[actor.Code] + if !ok { + return xerrors.Errorf("didn't find mapping for %s", actor.Code) + } + + return actorsOut.SetActor(a, &types.ActorV5{ + Code: newCid, + Head: actor.Head, + Nonce: actor.Nonce, + Balance: actor.Balance, + Address: actor.Address, + }) + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to perform migration: %w", err) + } + + systemState.BuiltinActors = newManifest.Data + newSystemHead, err := adtStore.Put(ctx, &systemState) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to put new system state: %w", err) + } + + systemActor.Head = newSystemHead + if err = actorsOut.SetActor(builtin.SystemActorAddr, systemActor); err != nil { + return cid.Undef, xerrors.Errorf("failed to put new system actor: %w", err) + } + + // Sanity checking + + err = actorsIn.ForEach(func(a address.Address, inActor *types.Actor) error { + outActor, err := actorsOut.GetActor(a) + if err != nil { + return xerrors.Errorf("failed to get actor in outTree: %w", err) + } + + if inActor.Nonce != outActor.Nonce { + return xerrors.Errorf("mismatched nonce for actor %s", a) + } + + if !inActor.Balance.Equals(outActor.Balance) { + return xerrors.Errorf("mismatched balance for actor %s: %d != %d", a, inActor.Balance, outActor.Balance) + } + + if inActor.Address != outActor.Address && inActor.Address.String() != outActor.Address.String() { + return xerrors.Errorf("mismatched address for actor %s: %s != %s", a, inActor.Address, outActor.Address) + } + + if inActor.Head != outActor.Head && a != builtin.SystemActorAddr { + return xerrors.Errorf("mismatched head for actor %s", a) + } + + // Actor Codes are only expected to change for the verifreg actor + if inActor.Code != oldBuggyVerifregCID && inActor.Code != outActor.Code { + return xerrors.Errorf("unexpected change in code for actor %s", a) + } + + return nil + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to sanity check migration: %w", err) + } + + // Persist the result. + newRoot, err := actorsOut.Flush(ctx) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + return newRoot, nil + } +} + //////////////////// // Example upgrade function if upgrade requires only code changes diff --git a/chain/events/cache.go b/chain/events/cache.go index 2eba1f085..67c59cb50 100644 --- a/chain/events/cache.go +++ b/chain/events/cache.go @@ -26,7 +26,7 @@ type cache struct { uncachedAPI } -func newCache(api EventAPI, gcConfidence abi.ChainEpoch) *cache { +func newCache(api EventHelperAPI, gcConfidence abi.ChainEpoch) *cache { return &cache{ newTSCache(api, gcConfidence), newMessageCache(api), diff --git a/chain/events/events.go b/chain/events/events.go index c68b62a64..a9da32cbb 100644 --- a/chain/events/events.go +++ b/chain/events/events.go @@ -28,7 +28,7 @@ type TipSetObserver interface { Revert(ctx context.Context, from, to *types.TipSet) error } -type EventAPI interface { +type EventHelperAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) @@ -47,7 +47,7 @@ type Events struct { *hcEvents } -func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence abi.ChainEpoch) (*Events, error) { +func newEventsWithGCConfidence(ctx context.Context, api EventHelperAPI, gcConfidence abi.ChainEpoch) (*Events, error) { cache := newCache(api, gcConfidence) ob := newObserver(cache, gcConfidence) @@ -61,7 +61,7 @@ func newEventsWithGCConfidence(ctx context.Context, api EventAPI, gcConfidence a return &Events{ob, he, headChange}, nil } -func NewEvents(ctx context.Context, api EventAPI) (*Events, error) { +func NewEvents(ctx context.Context, api EventHelperAPI) (*Events, error) { gcConfidence := 2 * build.ForkLengthThreshold return newEventsWithGCConfidence(ctx, api, gcConfidence) } diff --git a/chain/events/events_called.go b/chain/events/events_called.go index 3ac02b2f7..98e594c47 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -69,7 +69,7 @@ type queuedEvent struct { // Manages chain head change events, which may be forward (new tipset added to // chain) or backward (chain branch discarded in favour of heavier branch) type hcEvents struct { - cs EventAPI + cs EventHelperAPI lk sync.Mutex lastTs *types.TipSet @@ -94,7 +94,7 @@ type hcEvents struct { watcherEvents } -func newHCEvents(api EventAPI, obs *observer) *hcEvents { +func newHCEvents(api EventHelperAPI, obs *observer) *hcEvents { e := &hcEvents{ cs: api, confQueue: map[triggerH]map[msgH][]*queuedEvent{}, @@ -326,14 +326,14 @@ type headChangeAPI interface { // watcherEvents watches for a state change type watcherEvents struct { - cs EventAPI + cs EventHelperAPI hcAPI headChangeAPI lk sync.RWMutex matchers map[triggerID]StateMatchFunc } -func newWatcherEvents(hcAPI headChangeAPI, cs EventAPI) watcherEvents { +func newWatcherEvents(hcAPI headChangeAPI, cs EventHelperAPI) watcherEvents { return watcherEvents{ cs: cs, hcAPI: hcAPI, @@ -426,14 +426,14 @@ func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, // messageEvents watches for message calls to actors type messageEvents struct { - cs EventAPI + cs EventHelperAPI hcAPI headChangeAPI lk sync.RWMutex matchers map[triggerID]MsgMatchFunc } -func newMessageEvents(hcAPI headChangeAPI, cs EventAPI) messageEvents { +func newMessageEvents(hcAPI headChangeAPI, cs EventHelperAPI) messageEvents { return messageEvents{ cs: cs, hcAPI: hcAPI, diff --git a/chain/events/events_height.go b/chain/events/events_height.go index 5789be753..1296a7f25 100644 --- a/chain/events/events_height.go +++ b/chain/events/events_height.go @@ -22,7 +22,7 @@ type heightHandler struct { } type heightEvents struct { - api EventAPI + api EventHelperAPI gcConfidence abi.ChainEpoch lk sync.Mutex @@ -31,7 +31,7 @@ type heightEvents struct { lastGc abi.ChainEpoch //nolint:structcheck } -func newHeightEvents(api EventAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents { +func newHeightEvents(api EventHelperAPI, obs *observer, gcConfidence abi.ChainEpoch) *heightEvents { he := &heightEvents{ api: api, gcConfidence: gcConfidence, diff --git a/chain/events/events_test.go b/chain/events/events_test.go index f16434355..a1309b90a 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -358,7 +358,7 @@ func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...in fcs.sub(nil, nil) } -var _ EventAPI = &fakeCS{} +var _ EventHelperAPI = &fakeCS{} func TestAt(t *testing.T) { //stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001 diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index 24192a53e..1669d840e 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -27,14 +27,24 @@ func isIndexedValue(b uint8) bool { return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 } -type EventFilter struct { - id types.FilterID - minHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum - maxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum - tipsetCid cid.Cid - addresses []address.Address // list of f4 actor addresses that are extpected to emit the event - keys map[string][][]byte // map of key names to a list of alternate values that may match - maxResults int // maximum number of results to collect, 0 is unlimited +type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address.Address, bool) + +type EventFilter interface { + Filter + + TakeCollectedEvents(context.Context) []*CollectedEvent + CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error +} + +type eventFilter struct { + id types.FilterID + minHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum + maxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum + tipsetCid cid.Cid + addresses []address.Address // list of actor addresses that are extpected to emit the event + + keysWithCodec map[string][]types.ActorEventBlock // map of key names to a list of alternate values that may match + maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex collected []*CollectedEvent @@ -42,11 +52,11 @@ type EventFilter struct { ch chan<- interface{} } -var _ Filter = (*EventFilter)(nil) +var _ Filter = (*eventFilter)(nil) type CollectedEvent struct { Entries []types.EventEntry - EmitterAddr address.Address // f4 address of emitter + EmitterAddr address.Address // address of emitter EventIdx int // index of the event within the list of emitted events Reverted bool Height abi.ChainEpoch @@ -55,24 +65,24 @@ type CollectedEvent struct { MsgCid cid.Cid // cid of message that produced event } -func (f *EventFilter) ID() types.FilterID { +func (f *eventFilter) ID() types.FilterID { return f.id } -func (f *EventFilter) SetSubChannel(ch chan<- interface{}) { +func (f *eventFilter) SetSubChannel(ch chan<- interface{}) { f.mu.Lock() defer f.mu.Unlock() f.ch = ch f.collected = nil } -func (f *EventFilter) ClearSubChannel() { +func (f *eventFilter) ClearSubChannel() { f.mu.Lock() defer f.mu.Unlock() f.ch = nil } -func (f *EventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { +func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver AddressResolver) error { if !f.matchTipset(te) { return nil } @@ -137,13 +147,13 @@ func (f *EventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *EventFilter) setCollectedEvents(ces []*CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *EventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -153,14 +163,14 @@ func (f *EventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent return collected } -func (f *EventFilter) LastTaken() time.Time { +func (f *eventFilter) LastTaken() time.Time { f.mu.Lock() defer f.mu.Unlock() return f.lastTaken } // matchTipset reports whether this filter matches the given tipset -func (f *EventFilter) matchTipset(te *TipSetEvents) bool { +func (f *eventFilter) matchTipset(te *TipSetEvents) bool { if f.tipsetCid != cid.Undef { tsCid, err := te.Cid() if err != nil { @@ -178,7 +188,7 @@ func (f *EventFilter) matchTipset(te *TipSetEvents) bool { return true } -func (f *EventFilter) matchAddress(o address.Address) bool { +func (f *eventFilter) matchAddress(o address.Address) bool { if len(f.addresses) == 0 { return true } @@ -193,8 +203,8 @@ func (f *EventFilter) matchAddress(o address.Address) bool { return false } -func (f *EventFilter) matchKeys(ees []types.EventEntry) bool { - if len(f.keys) == 0 { +func (f *eventFilter) matchKeys(ees []types.EventEntry) bool { + if len(f.keysWithCodec) == 0 { return true } // TODO: optimize this naive algorithm @@ -216,19 +226,19 @@ func (f *EventFilter) matchKeys(ees []types.EventEntry) bool { continue } - wantlist, ok := f.keys[keyname] + wantlist, ok := f.keysWithCodec[keyname] if !ok || len(wantlist) == 0 { continue } for _, w := range wantlist { - if bytes.Equal(w, ee.Value) { + if bytes.Equal(w.Value, ee.Value) && w.Codec == ee.Codec { matched[keyname] = true break } } - if len(matched) == len(f.keys) { + if len(matched) == len(f.keysWithCodec) { // all keys have been matched return true } @@ -296,7 +306,7 @@ type EventFilterManager struct { EventIndex *EventIndex mu sync.Mutex // guards mutations to filters - filters map[types.FilterID]*EventFilter + filters map[types.FilterID]EventFilter currentHeight abi.ChainEpoch } @@ -362,7 +372,8 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) return nil } -func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address, keys map[string][][]byte) (*EventFilter, error) { +func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) { m.mu.Lock() currentHeight := m.currentHeight m.mu.Unlock() @@ -376,26 +387,26 @@ func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight a return nil, xerrors.Errorf("new filter id: %w", err) } - f := &EventFilter{ - id: id, - minHeight: minHeight, - maxHeight: maxHeight, - tipsetCid: tipsetCid, - addresses: addresses, - keys: keys, - maxResults: m.MaxFilterResults, + f := &eventFilter{ + id: id, + minHeight: minHeight, + maxHeight: maxHeight, + tipsetCid: tipsetCid, + addresses: addresses, + keysWithCodec: keysWithCodec, + maxResults: m.MaxFilterResults, } if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { // Filter needs historic events - if err := m.EventIndex.PrefillFilter(ctx, f, true); err != nil { + if err := m.EventIndex.prefillFilter(ctx, f, excludeReverted); err != nil { return nil, err } } m.mu.Lock() if m.filters == nil { - m.filters = make(map[types.FilterID]*EventFilter) + m.filters = make(map[types.FilterID]EventFilter) } m.filters[id] = f m.mu.Unlock() diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index 329573bc1..c650b71eb 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -22,6 +22,19 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { + keysWithCodec := make(map[string][]types.ActorEventBlock) + for k, v := range keys { + for _, vv := range v { + keysWithCodec[k] = append(keysWithCodec[k], types.ActorEventBlock{ + Codec: cid.Raw, + Value: vv, + }) + } + } + return keysWithCodec +} + func TestEventFilterCollectEvents(t *testing.T) { rng := pseudo.New(pseudo.NewSource(299792458)) a1 := randomF4Addr(t, rng) @@ -73,13 +86,13 @@ func TestEventFilterCollectEvents(t *testing.T) { testCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -88,7 +101,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -97,7 +110,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -106,7 +119,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -116,7 +129,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -126,7 +139,7 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -136,124 +149,124 @@ func TestEventFilterCollectEvents(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index 2b1890c73..92bb3877e 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -46,6 +46,7 @@ var ddls = []string{ )`, `CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`, + `CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`, `CREATE TABLE IF NOT EXISTS event_entry ( event_id INTEGER, @@ -56,6 +57,8 @@ var ddls = []string{ value BLOB NOT NULL )`, + `CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)`, + // metadata containing version of schema `CREATE TABLE IF NOT EXISTS _meta ( version UINT64 NOT NULL UNIQUE @@ -63,6 +66,7 @@ var ddls = []string{ `INSERT OR IGNORE INTO _meta (version) VALUES (1)`, `INSERT OR IGNORE INTO _meta (version) VALUES (2)`, + `INSERT OR IGNORE INTO _meta (version) VALUES (3)`, } var ( @@ -70,7 +74,7 @@ var ( ) const ( - schemaVersion = 2 + schemaVersion = 3 eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` @@ -321,6 +325,22 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor version = 2 } + if version == 2 { + log.Infof("upgrading event index from version 2 to version 3") + + // to upgrade to version 3 we only need to create an index on the event_entry.key column + // and on the event.emitter_addr column + // which means we can just reapply the schema (it will not have any effect on existing data) + for _, ddl := range ddls { + if _, err := db.Exec(ddl); err != nil { + _ = db.Close() + return nil, xerrors.Errorf("could not upgrade index to version 3, exec ddl %q: %w", ddl, err) + } + } + + version = 3 + } + if version != schemaVersion { _ = db.Close() return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion) @@ -481,7 +501,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // PrefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, excludeReverted bool) error { +func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { clauses := []string{} values := []any{} joins := []string{} @@ -514,9 +534,9 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, exclude clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") } - if len(f.keys) > 0 { + if len(f.keysWithCodec) > 0 { join := 0 - for key, vals := range f.keys { + for key, vals := range f.keysWithCodec { if len(vals) > 0 { join++ joinAlias := fmt.Sprintf("ee%d", join) @@ -525,8 +545,8 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, exclude values = append(values, key) subclauses := []string{} for _, val := range vals { - subclauses = append(subclauses, fmt.Sprintf("%s.value=?", joinAlias)) - values = append(values, val) + subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) + values = append(values, val.Value, val.Codec) } clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") } @@ -557,7 +577,8 @@ func (ei *EventIndex) PrefillFilter(ctx context.Context, f *EventFilter, exclude s = s + " WHERE " + strings.Join(clauses, " AND ") } - s += " ORDER BY event.height DESC" + // retain insertion order of event_entry rows with the implicit _rowid_ column + s += " ORDER BY event.height DESC, event_entry._rowid_ ASC" stmt, err := ei.db.Prepare(s) if err != nil { diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index f9b1b14ad..ce3f7b78a 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -82,13 +82,13 @@ func TestEventIndexPrefillFilter(t *testing.T) { testCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -97,7 +97,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -106,7 +106,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -115,7 +115,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -125,7 +125,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -135,7 +135,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -145,124 +145,124 @@ func TestEventIndexPrefillFilter(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -272,7 +272,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { for _, tc := range testCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -409,13 +409,13 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { inclusiveTestCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -424,7 +424,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -433,7 +433,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -442,7 +442,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -452,7 +452,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: reveredCID14000, @@ -462,7 +462,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a3}, @@ -472,7 +472,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address 2", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -482,7 +482,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address 1", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -492,155 +492,155 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: twoCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: twoCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: revertedEvents14000, want: oneCollectedRevertedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr3"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988182"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -649,13 +649,13 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { exclusiveTestCases := []struct { name string - filter *EventFilter + filter *eventFilter te *TipSetEvents want []*CollectedEvent }{ { name: "nomatch tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14001, maxHeight: -1, }, @@ -664,7 +664,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch tipset max height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: 13999, }, @@ -673,7 +673,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset min height", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: 14000, maxHeight: -1, }, @@ -682,7 +682,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: cid14000, @@ -692,7 +692,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match tipset cid but reverted", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, tipsetCid: reveredCID14000, @@ -702,7 +702,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a3}, @@ -712,7 +712,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "nomatch address 2 but reverted", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a2}, @@ -722,7 +722,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match address", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, addresses: []address.Address{a1}, @@ -732,141 +732,141 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { }, { name: "match one entry", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "match one entry with alternate values", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), []byte("approval"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry by missing value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("cancel"), []byte("propose"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry by missing key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "method": { []byte("approval"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "match one entry with multiple keys", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr1"), }, - }, + }), }, te: events14000, want: oneCollectedEvent, }, { name: "nomatch one entry with one mismatching key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "approver": { []byte("addr1"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with matching reverted value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr2"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one mismatching value", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "type": { []byte("approval"), }, "signer": { []byte("addr3"), }, - }, + }), }, te: events14000, want: noCollectedEvents, }, { name: "nomatch one entry with one unindexed key", - filter: &EventFilter{ + filter: &eventFilter{ minHeight: -1, maxHeight: -1, - keys: map[string][][]byte{ + keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ "amount": { []byte("2988181"), }, - }, + }), }, te: events14000, want: noCollectedEvents, @@ -876,7 +876,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { for _, tc := range inclusiveTestCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, false); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { require.NoError(t, err, "prefill filter events") } @@ -888,7 +888,7 @@ func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { for _, tc := range exclusiveTestCases { tc := tc // appease lint t.Run(tc.name, func(t *testing.T) { - if err := ei.PrefillFilter(context.Background(), tc.filter, true); err != nil { + if err := ei.prefillFilter(context.Background(), tc.filter, true); err != nil { require.NoError(t, err, "prefill filter events") } diff --git a/chain/events/message_cache.go b/chain/events/message_cache.go index 96f6bcbd7..24b3c934a 100644 --- a/chain/events/message_cache.go +++ b/chain/events/message_cache.go @@ -11,13 +11,13 @@ import ( ) type messageCache struct { - api EventAPI + api EventHelperAPI blockMsgLk sync.Mutex blockMsgCache *arc.ARCCache[cid.Cid, *api.BlockMessages] } -func newMessageCache(a EventAPI) *messageCache { +func newMessageCache(a EventHelperAPI) *messageCache { blsMsgCache, _ := arc.NewARC[cid.Cid, *api.BlockMessages](500) return &messageCache{ diff --git a/chain/events/observer.go b/chain/events/observer.go index 446218585..0b021f996 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -17,7 +17,7 @@ import ( ) type observer struct { - api EventAPI + api EventHelperAPI gcConfidence abi.ChainEpoch diff --git a/chain/events/state/predicates.go b/chain/events/state/predicates.go index ff05156a6..e4e8b8f7e 100644 --- a/chain/events/state/predicates.go +++ b/chain/events/state/predicates.go @@ -242,7 +242,7 @@ func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDeal } existenceChanged := oldFound != newFound - valueChanged := (oldFound && newFound) && *oldDeal != *newDeal + valueChanged := (oldFound && newFound) && !oldDeal.Equals(newDeal) if existenceChanged || valueChanged { changedDeals[dealID] = market.DealStateChange{ID: dealID, From: oldDeal, To: newDeal} } diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index 52fc2668a..79c1d2e0e 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -177,11 +177,11 @@ func TestMarketPredicates(t *testing.T) { require.Contains(t, changedDealIDs, abi.DealID(1)) require.Contains(t, changedDealIDs, abi.DealID(2)) deal1 := changedDealIDs[abi.DealID(1)] - if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { + if deal1.From.LastUpdatedEpoch() != 2 || deal1.To.LastUpdatedEpoch() != 3 { t.Fatal("Unexpected change to LastUpdatedEpoch") } deal2 := changedDealIDs[abi.DealID(2)] - if deal2.From.LastUpdatedEpoch != 5 || deal2.To != nil { + if deal2.From.LastUpdatedEpoch() != 5 || deal2.To != nil { t.Fatal("Expected To to be nil") } @@ -243,8 +243,8 @@ func TestMarketPredicates(t *testing.T) { require.Len(t, changedDeals.Modified, 1) require.Equal(t, abi.DealID(1), changedDeals.Modified[0].ID) - require.True(t, dealEquality(*newDeal1, *changedDeals.Modified[0].To)) - require.True(t, dealEquality(*oldDeal1, *changedDeals.Modified[0].From)) + require.True(t, dealEquality(*newDeal1, changedDeals.Modified[0].To)) + require.True(t, dealEquality(*oldDeal1, changedDeals.Modified[0].From)) require.Equal(t, abi.DealID(2), changedDeals.Removed[0].ID) }) @@ -579,7 +579,7 @@ func newSectorPreCommitInfo(sectorNo abi.SectorNumber, sealed cid.Cid, expiratio } func dealEquality(expected market2.DealState, actual market.DealState) bool { - return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch && - expected.SectorStartEpoch == actual.SectorStartEpoch && - expected.SlashEpoch == actual.SlashEpoch + return expected.LastUpdatedEpoch == actual.LastUpdatedEpoch() && + expected.SectorStartEpoch == actual.SectorStartEpoch() && + expected.SlashEpoch == actual.SlashEpoch() } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index 71c75869d..4aa74f0c0 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -35,7 +35,7 @@ func (t *Request) MarshalCBOR(w io.Writer) error { } // t.Head ([]cid.Cid) (slice) - if len(t.Head) > cbg.MaxLength { + if len(t.Head) > 8192 { return xerrors.Errorf("Slice value in field t.Head was too long") } @@ -95,7 +95,7 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Head: array too large (%d)", extra) } @@ -126,9 +126,9 @@ func (t *Request) UnmarshalCBOR(r io.Reader) (err error) { t.Head[i] = c } + } } - // t.Length (uint64) (uint64) { @@ -181,7 +181,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { } // t.ErrorMessage (string) (string) - if len(t.ErrorMessage) > cbg.MaxLength { + if len(t.ErrorMessage) > 8192 { return xerrors.Errorf("Value in field t.ErrorMessage was too long") } @@ -193,7 +193,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { } // t.Chain ([]*exchange.BSTipSet) (slice) - if len(t.Chain) > cbg.MaxLength { + if len(t.Chain) > 8192 { return xerrors.Errorf("Slice value in field t.Chain was too long") } @@ -204,6 +204,7 @@ func (t *Response) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -248,7 +249,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { // t.ErrorMessage (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -262,7 +263,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Chain: array too large (%d)", extra) } @@ -300,9 +301,9 @@ func (t *Response) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -332,10 +333,11 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.BlsIncludes ([]exchange.messageIndices) (slice) - if len(t.BlsIncludes) > cbg.MaxLength { + if len(t.BlsIncludes) > 8192 { return xerrors.Errorf("Slice value in field t.BlsIncludes was too long") } @@ -346,6 +348,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Secpk ([]*types.SignedMessage) (slice) @@ -360,10 +363,11 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.SecpkIncludes ([]exchange.messageIndices) (slice) - if len(t.SecpkIncludes) > cbg.MaxLength { + if len(t.SecpkIncludes) > 8192 { return xerrors.Errorf("Slice value in field t.SecpkIncludes was too long") } @@ -374,6 +378,7 @@ func (t *CompactedMessagesCBOR) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -446,9 +451,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.BlsIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -456,7 +461,7 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BlsIncludes: array too large (%d)", extra) } @@ -484,9 +489,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Secpk ([]*types.SignedMessage) (slice) maj, extra, err = cr.ReadHeader() @@ -532,9 +537,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.SecpkIncludes ([]exchange.messageIndices) (slice) maj, extra, err = cr.ReadHeader() @@ -542,7 +547,7 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.SecpkIncludes: array too large (%d)", extra) } @@ -570,9 +575,9 @@ func (t *CompactedMessagesCBOR) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -591,7 +596,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error { } // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { + if len(t.Blocks) > 8192 { return xerrors.Errorf("Slice value in field t.Blocks was too long") } @@ -602,6 +607,7 @@ func (t *BSTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Messages (exchange.CompactedMessages) (struct) @@ -641,7 +647,7 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Blocks: array too large (%d)", extra) } @@ -679,9 +685,9 @@ func (t *BSTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Messages (exchange.CompactedMessages) (struct) { diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index df8900cab..9ae39cf35 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -374,13 +374,33 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // Commit sectors { for pi, preseal := range m.Sectors { - params := &minertypes.SectorPreCommitInfo{ - SealProof: preseal.ProofType, - SectorNumber: preseal.SectorID, - SealedCID: preseal.CommR, - SealRandEpoch: -1, - DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, - Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + var paramEnc []byte + var preCommitMethodNum abi.MethodNum + if nv >= network.Version22 { + paramEnc = mustEnc(&miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + UnsealedCid: &preseal.CommD, + }, + }, + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSectorBatch2 + } else { + paramEnc = mustEnc(&minertypes.SectorPreCommitInfo{ + SealProof: preseal.ProofType, + SectorNumber: preseal.SectorID, + SealedCID: preseal.CommR, + SealRandEpoch: -1, + DealIDs: []abi.DealID{minerInfos[i].dealIDs[pi]}, + Expiration: minerInfos[i].presealExp, // TODO: Allow setting externally! + }) + preCommitMethodNum = builtintypes.MethodsMiner.PreCommitSector } sectorWeight := minerInfos[i].sectorWeight[pi] @@ -463,7 +483,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal pledge = big.Add(pcd, pledge) - _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, builtintypes.MethodsMiner.PreCommitSector, mustEnc(params)) + _, err = doExecValue(ctx, genesisVm, minerInfos[i].maddr, m.Worker, pledge, preCommitMethodNum, paramEnc) if err != nil { return cid.Undef, xerrors.Errorf("failed to confirm presealed sectors: %w", err) } diff --git a/chain/messagesigner/messagesigner_consensus.go b/chain/messagesigner/messagesigner_consensus.go new file mode 100644 index 000000000..905bb7199 --- /dev/null +++ b/chain/messagesigner/messagesigner_consensus.go @@ -0,0 +1,98 @@ +package messagesigner + +import ( + "context" + + "github.com/google/uuid" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/libp2p/go-libp2p/core/peer" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/types" + consensus "github.com/filecoin-project/lotus/lib/consensus/raft" + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +type MessageSignerConsensus struct { + MsgSigner + Consensus *consensus.Consensus +} + +func NewMessageSignerConsensus( + wallet api.Wallet, + mpool messagepool.MpoolNonceAPI, + ds dtypes.MetadataDS, + consensus *consensus.Consensus) *MessageSignerConsensus { + + ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/")) + return &MessageSignerConsensus{ + MsgSigner: &MessageSigner{ + wallet: wallet, + mpool: mpool, + ds: ds, + }, + Consensus: consensus, + } +} + +func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool { + return ms.Consensus.IsLeader(ctx) +} + +func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) { + ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage)) + if err != nil { + return ok, err + } + return ok, nil +} + +func (ms *MessageSignerConsensus) SignMessage( + ctx context.Context, + msg *types.Message, + spec *api.MessageSendSpec, + cb func(*types.SignedMessage) error) (*types.SignedMessage, error) { + + signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb) + if err != nil { + return nil, err + } + + op := &consensus.ConsensusOp{ + Nonce: signedMsg.Message.Nonce, + Uuid: spec.MsgUuid, + Addr: signedMsg.Message.From, + SignedMsg: signedMsg, + } + err = ms.Consensus.Commit(ctx, op) + if err != nil { + return nil, err + } + + return signedMsg, nil +} + +func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) { + cstate, err := ms.Consensus.State(ctx) + if err != nil { + return nil, err + } + + //cstate := state.(Consensus.RaftState) + msg, ok := cstate.MsgUuids[uuid] + if !ok { + return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid) + } + return msg, nil +} + +func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) { + return ms.Consensus.State(ctx) +} + +func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) { + return ms.Consensus.Leader(ctx) +} diff --git a/chain/state/statetree.go b/chain/state/statetree.go index 61d7d500a..1a6497d04 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -156,7 +156,7 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { case network.Version13, network.Version14, network.Version15, network.Version16, network.Version17: return types.StateTreeVersion4, nil - case network.Version18, network.Version19, network.Version20, network.Version21: + case network.Version18, network.Version19, network.Version20, network.Version21, network.Version22: return types.StateTreeVersion5, nil default: diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 56744fa74..f1d615e8d 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -284,7 +284,7 @@ func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts return &api.MarketDeal{ Proposal: *proposal, - State: *st, + State: api.MakeDealState(st), }, nil } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 9dd66ee8b..93f53c63f 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -203,10 +203,6 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig log.Errorw("FAILED migration", "height", height, "from", root, "error", err) return cid.Undef, err } - // Yes, we update the cache, even for the final upgrade epoch. Why? Reverts. This - // can save us a _lot_ of time because very few actors will have changed if we - // do a small revert then need to re-run the migration. - u.cache.Update(tmpCache) log.Warnw("COMPLETED migration", "height", height, "from", root, diff --git a/chain/sync.go b/chain/sync.go index 4dccc2036..ded6ab1b5 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -704,25 +704,25 @@ func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet } { - // ensure consistency of beacon entires + // ensure consistency of beacon entries targetBE := incoming.Blocks()[0].BeaconEntries sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool { return targetBE[i].Round < targetBE[j].Round }) if !sorted { - syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires")) - return nil, xerrors.Errorf("wrong order of beacon entires") + syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entries")) + return nil, xerrors.Errorf("wrong order of beacon entries") } for _, bh := range incoming.Blocks()[1:] { if len(targetBE) != len(bh.BeaconEntries) { // cannot mark bad, I think @Kubuxu - return nil, xerrors.Errorf("tipset contained different number for beacon entires") + return nil, xerrors.Errorf("tipset contained different number for beacon entries") } for i, be := range bh.BeaconEntries { if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) { // cannot mark bad, I think @Kubuxu - return nil, xerrors.Errorf("tipset contained different beacon entires") + return nil, xerrors.Errorf("tipset contained different beacon entries") } } diff --git a/chain/types/actor_event.go b/chain/types/actor_event.go new file mode 100644 index 000000000..bf95189e1 --- /dev/null +++ b/chain/types/actor_event.go @@ -0,0 +1,67 @@ +package types + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" +) + +type ActorEventBlock struct { + // The value codec to match when filtering event values. + Codec uint64 `json:"codec"` + + // The value to want to match on associated with the corresponding "event key" + // when filtering events. + // Should be a byte array encoded with the specified codec. + // Assumes base64 encoding when converting to/from JSON strings. + Value []byte `json:"value"` +} + +type ActorEventFilter struct { + // Matches events from one of these actors, or any actor if empty. + // For now, this MUST be a Filecoin address. + Addresses []address.Address `json:"addresses,omitempty"` + + // Matches events with the specified key/values, or all events if empty. + // If the value is an empty slice, the filter will match on the key only, accepting any value. + Fields map[string][]ActorEventBlock `json:"fields,omitempty"` + + // The height of the earliest tipset to include in the query. If empty, the query starts at the + // last finalized tipset. + // NOTE: In a future upgrade, this will be strict when set and will result in an error if a filter + // cannot be fulfilled by the depth of history available in the node. Currently, the node will + // nott return an error, but will return starting from the epoch it has data for. + FromHeight *abi.ChainEpoch `json:"fromHeight,omitempty"` + + // The height of the latest tipset to include in the query. If empty, the query ends at the + // latest tipset. + ToHeight *abi.ChainEpoch `json:"toHeight,omitempty"` + + // Restricts events returned to those emitted from messages contained in this tipset. + // If `TipSetKey` is legt empty in the filter criteria, then neither `FromHeight` nor `ToHeight` are allowed. + TipSetKey *TipSetKey `json:"tipsetKey,omitempty"` +} + +type ActorEvent struct { + // Event entries in log form. + Entries []EventEntry `json:"entries"` + + // Filecoin address of the actor that emitted this event. + // NOTE: In a future upgrade, this will change to always be an ID address. Currently this will be + // either the f4 address, or ID address if an f4 is not available for this actor. + Emitter address.Address `json:"emitter"` + + // Reverted is set to true if the message that produced this event was reverted because of a network re-org + // in that case, the event should be considered as reverted as well. + Reverted bool `json:"reverted"` + + // Height of the tipset that contained the message that produced this event. + Height abi.ChainEpoch `json:"height"` + + // The tipset that contained the message that produced this event. + TipSetKey TipSetKey `json:"tipsetKey"` + + // CID of message that produced this event. + MsgCid cid.Cid `json:"msgCid"` +} diff --git a/chain/types/actor_event_test.go b/chain/types/actor_event_test.go new file mode 100644 index 000000000..8c50b1717 --- /dev/null +++ b/chain/types/actor_event_test.go @@ -0,0 +1,125 @@ +package types + +import ( + "encoding/json" + pseudo "math/rand" + "testing" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + builtintypes "github.com/filecoin-project/go-state-types/builtin" +) + +func TestJSONMarshalling(t *testing.T) { + rng := pseudo.New(pseudo.NewSource(0)) + t.Run("actor event with entries", + testJsonMarshalling( + ActorEvent{ + Entries: []EventEntry{ + { + Key: "key1", + Codec: 0x51, + Value: []byte("value1"), + }, + { + Key: "key2", + Codec: 0x52, + Value: []byte("value2"), + }, + }, + Emitter: randomF4Addr(t, rng), + Reverted: false, + Height: 1001, + TipSetKey: NewTipSetKey(randomCid(t, rng)), + MsgCid: randomCid(t, rng), + }, + `{"entries":[{"Flags":0,"Key":"key1","Codec":81,"Value":"dmFsdWUx"},{"Flags":0,"Key":"key2","Codec":82,"Value":"dmFsdWUy"}],"emitter":"f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua","reverted":false,"height":1001,"tipsetKey":[{"/":"bafkqacx3dag26sfht3qlcdi"}],"msgCid":{"/":"bafkqacrziziykd6uuf4islq"}}`, + ), + ) + + t.Run("actor event filter", + testJsonMarshalling( + ActorEventFilter{ + Addresses: []address.Address{ + randomF4Addr(t, pseudo.New(pseudo.NewSource(0))), + randomF4Addr(t, pseudo.New(pseudo.NewSource(0))), + }, + Fields: map[string][]ActorEventBlock{ + "key1": { + { + Codec: 0x51, + Value: []byte("value1"), + }, + }, + "key2": { + { + Codec: 0x52, + Value: []byte("value2"), + }, + }, + }, + FromHeight: heightOf(0), + ToHeight: heightOf(100), + TipSetKey: randomTipSetKey(t, rng), + }, + `{"addresses":["f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua","f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua"],"fields":{"key1":[{"codec":81,"value":"dmFsdWUx"}],"key2":[{"codec":82,"value":"dmFsdWUy"}]},"fromHeight":0,"toHeight":100,"tipsetKey":[{"/":"bafkqacxcqxwocuiukv4aq5i"}]}`, + ), + ) + t.Run("actor event block", + testJsonMarshalling( + ActorEventBlock{ + Codec: 1, + Value: []byte("test"), + }, + `{"codec":1,"value":"dGVzdA=="}`, + ), + ) +} + +func testJsonMarshalling[V ActorEvent | ActorEventBlock | ActorEventFilter](subject V, expect string) func(t *testing.T) { + return func(t *testing.T) { + gotMarshalled, err := json.Marshal(subject) + require.NoError(t, err) + require.JSONEqf(t, expect, string(gotMarshalled), "serialization mismatch") + var gotUnmarshalled V + require.NoError(t, json.Unmarshal([]byte(expect), &gotUnmarshalled)) + require.Equal(t, subject, gotUnmarshalled) + } +} + +func heightOf(h int64) *abi.ChainEpoch { + hp := abi.ChainEpoch(h) + return &hp +} + +func randomTipSetKey(tb testing.TB, rng *pseudo.Rand) *TipSetKey { + tb.Helper() + tk := NewTipSetKey(randomCid(tb, rng)) + return &tk +} + +func randomF4Addr(tb testing.TB, rng *pseudo.Rand) address.Address { + tb.Helper() + addr, err := address.NewDelegatedAddress(builtintypes.EthereumAddressManagerActorID, randomBytes(32, rng)) + require.NoError(tb, err) + + return addr +} + +func randomCid(tb testing.TB, rng *pseudo.Rand) cid.Cid { + tb.Helper() + cb := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + c, err := cb.Sum(randomBytes(10, rng)) + require.NoError(tb, err) + return c +} + +func randomBytes(n int, rng *pseudo.Rand) []byte { + buf := make([]byte, n) + rng.Read(buf) + return buf +} diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index fe8e7e3fe..dde703cee 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -55,7 +55,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { } // t.BeaconEntries ([]types.BeaconEntry) (slice) - if len(t.BeaconEntries) > cbg.MaxLength { + if len(t.BeaconEntries) > 8192 { return xerrors.Errorf("Slice value in field t.BeaconEntries was too long") } @@ -66,10 +66,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.WinPoStProof ([]proof.PoStProof) (slice) - if len(t.WinPoStProof) > cbg.MaxLength { + if len(t.WinPoStProof) > 8192 { return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") } @@ -80,10 +81,11 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Parents ([]cid.Cid) (slice) - if len(t.Parents) > cbg.MaxLength { + if len(t.Parents) > 8192 { return xerrors.Errorf("Slice value in field t.Parents was too long") } @@ -238,7 +240,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra) } @@ -266,9 +268,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.WinPoStProof ([]proof.PoStProof) (slice) maj, extra, err = cr.ReadHeader() @@ -276,7 +278,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra) } @@ -304,9 +306,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Parents ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -314,7 +316,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Parents: array too large (%d)", extra) } @@ -345,9 +347,9 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { t.Parents[i] = c } + } } - // t.ParentWeight (big.Int) (struct) { @@ -360,10 +362,10 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) { // t.Height (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -511,7 +513,7 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { } // t.VRFProof ([]uint8) (slice) - if len(t.VRFProof) > cbg.ByteArrayMaxLen { + if len(t.VRFProof) > 2097152 { return xerrors.Errorf("Byte array in field t.VRFProof was too long") } @@ -519,9 +521,10 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -555,7 +558,7 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -566,9 +569,10 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -598,7 +602,7 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { } // t.VRFProof ([]uint8) (slice) - if len(t.VRFProof) > cbg.ByteArrayMaxLen { + if len(t.VRFProof) > 2097152 { return xerrors.Errorf("Byte array in field t.VRFProof was too long") } @@ -606,9 +610,10 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.VRFProof[:]); err != nil { + if _, err := cw.Write(t.VRFProof); err != nil { return err } + return nil } @@ -638,10 +643,10 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { // t.WinCount (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -667,7 +672,7 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -678,9 +683,10 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) { t.VRFProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.VRFProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.VRFProof); err != nil { return err } + return nil } @@ -753,7 +759,7 @@ func (t *Message) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -761,9 +767,10 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -848,10 +855,10 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { // t.GasLimit (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -909,7 +916,7 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -920,9 +927,10 @@ func (t *Message) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -1343,7 +1351,7 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } // t.BlsMessages ([]cid.Cid) (slice) - if len(t.BlsMessages) > cbg.MaxLength { + if len(t.BlsMessages) > 8192 { return xerrors.Errorf("Slice value in field t.BlsMessages was too long") } @@ -1359,7 +1367,7 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } // t.SecpkMessages ([]cid.Cid) (slice) - if len(t.SecpkMessages) > cbg.MaxLength { + if len(t.SecpkMessages) > 8192 { return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") } @@ -1425,7 +1433,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) } @@ -1456,9 +1464,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.BlsMessages[i] = c } + } } - // t.SecpkMessages ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -1466,7 +1474,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) } @@ -1497,9 +1505,9 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) { t.SecpkMessages[i] = c } + } } - return nil } @@ -1518,7 +1526,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } // t.Cids ([]cid.Cid) (slice) - if len(t.Cids) > cbg.MaxLength { + if len(t.Cids) > 8192 { return xerrors.Errorf("Slice value in field t.Cids was too long") } @@ -1534,7 +1542,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { + if len(t.Blocks) > 8192 { return xerrors.Errorf("Slice value in field t.Blocks was too long") } @@ -1545,6 +1553,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Height (abi.ChainEpoch) (int64) @@ -1557,6 +1566,7 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -1590,7 +1600,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Cids: array too large (%d)", extra) } @@ -1621,9 +1631,9 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { t.Cids[i] = c } + } } - // t.Blocks ([]*types.BlockHeader) (slice) maj, extra, err = cr.ReadHeader() @@ -1631,7 +1641,7 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Blocks: array too large (%d)", extra) } @@ -1669,16 +1679,16 @@ func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Height (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1721,7 +1731,7 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { } // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { + if len(t.Data) > 2097152 { return xerrors.Errorf("Byte array in field t.Data was too long") } @@ -1729,9 +1739,10 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Data[:]); err != nil { + if _, err := cw.Write(t.Data); err != nil { return err } + return nil } @@ -1779,7 +1790,7 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Data: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1790,9 +1801,10 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Data = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Data[:]); err != nil { + if _, err := io.ReadFull(cr, t.Data); err != nil { return err } + return nil } @@ -1908,6 +1920,7 @@ func (t *StateInfo0) MarshalCBOR(w io.Writer) error { if _, err := cw.Write(lengthBufStateInfo0); err != nil { return err } + return nil } @@ -1958,7 +1971,7 @@ func (t *Event) MarshalCBOR(w io.Writer) error { } // t.Entries ([]types.EventEntry) (slice) - if len(t.Entries) > cbg.MaxLength { + if len(t.Entries) > 8192 { return xerrors.Errorf("Slice value in field t.Entries was too long") } @@ -1969,6 +1982,7 @@ func (t *Event) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2017,7 +2031,7 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Entries: array too large (%d)", extra) } @@ -2045,9 +2059,9 @@ func (t *Event) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -2071,7 +2085,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { } // t.Key (string) (string) - if len(t.Key) > cbg.MaxLength { + if len(t.Key) > 8192 { return xerrors.Errorf("Value in field t.Key was too long") } @@ -2089,7 +2103,7 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { } // t.Value ([]uint8) (slice) - if len(t.Value) > cbg.ByteArrayMaxLen { + if len(t.Value) > 2097152 { return xerrors.Errorf("Byte array in field t.Value was too long") } @@ -2097,9 +2111,10 @@ func (t *EventEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Value[:]); err != nil { + if _, err := cw.Write(t.Value); err != nil { return err } + return nil } @@ -2142,7 +2157,7 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { // t.Key (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -2170,7 +2185,7 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Value: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2181,9 +2196,10 @@ func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) { t.Value = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Value[:]); err != nil { + if _, err := io.ReadFull(cr, t.Value); err != nil { return err } + return nil } @@ -2202,7 +2218,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error { } // t.Name (string) (string) - if len(t.Name) > cbg.MaxLength { + if len(t.Name) > 8192 { return xerrors.Errorf("Value in field t.Name was too long") } @@ -2256,6 +2272,7 @@ func (t *GasTrace) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -2285,7 +2302,7 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.Name (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -2295,10 +2312,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.TotalGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2320,10 +2337,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.ComputeGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2345,10 +2362,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.StorageGas (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2370,10 +2387,10 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.TimeTaken (time.Duration) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2395,7 +2412,83 @@ func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) { return nil } -var lengthBufMessageTrace = []byte{137} +var lengthBufActorTrace = []byte{130} + +func (t *ActorTrace) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write(lengthBufActorTrace); err != nil { + return err + } + + // t.Id (abi.ActorID) (uint64) + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Id)); err != nil { + return err + } + + // t.State (types.ActorV5) (struct) + if err := t.State.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *ActorTrace) UnmarshalCBOR(r io.Reader) (err error) { + *t = ActorTrace{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Id (abi.ActorID) (uint64) + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Id = abi.ActorID(extra) + + } + // t.State (types.ActorV5) (struct) + + { + + if err := t.State.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.State: %w", err) + } + + } + return nil +} + +var lengthBufMessageTrace = []byte{136} func (t *MessageTrace) MarshalCBOR(w io.Writer) error { if t == nil { @@ -2431,7 +2524,7 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -2439,7 +2532,7 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } @@ -2459,13 +2552,6 @@ func (t *MessageTrace) MarshalCBOR(w io.Writer) error { if err := cbg.WriteBool(w, t.ReadOnly); err != nil { return err } - - // t.CodeCid (cid.Cid) (struct) - - if err := cbg.WriteCid(cw, t.CodeCid); err != nil { - return xerrors.Errorf("failed to write cid field t.CodeCid: %w", err) - } - return nil } @@ -2488,7 +2574,7 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { return fmt.Errorf("cbor input should be of type array") } - if extra != 9 { + if extra != 8 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -2540,7 +2626,7 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2551,9 +2637,10 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + // t.ParamsCodec (uint64) (uint64) { @@ -2599,18 +2686,6 @@ func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) { default: return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) } - // t.CodeCid (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("failed to read cid field t.CodeCid: %w", err) - } - - t.CodeCid = c - - } return nil } @@ -2640,7 +2715,7 @@ func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { } // t.Return ([]uint8) (slice) - if len(t.Return) > cbg.ByteArrayMaxLen { + if len(t.Return) > 2097152 { return xerrors.Errorf("Byte array in field t.Return was too long") } @@ -2648,7 +2723,7 @@ func (t *ReturnTrace) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -2687,10 +2762,10 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { // t.ExitCode (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -2716,7 +2791,7 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Return: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -2727,9 +2802,10 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.ReturnCodec (uint64) (uint64) { @@ -2747,7 +2823,7 @@ func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) { return nil } -var lengthBufExecutionTrace = []byte{132} +var lengthBufExecutionTrace = []byte{133} func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if t == nil { @@ -2771,6 +2847,11 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { return err } + // t.InvokedActor (types.ActorTrace) (struct) + if err := t.InvokedActor.MarshalCBOR(cw); err != nil { + return err + } + // t.GasCharges ([]*types.GasTrace) (slice) if len(t.GasCharges) > 1000000000 { return xerrors.Errorf("Slice value in field t.GasCharges was too long") @@ -2783,6 +2864,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Subcalls ([]types.ExecutionTrace) (slice) @@ -2797,6 +2879,7 @@ func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -2820,7 +2903,7 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { return fmt.Errorf("cbor input should be of type array") } - if extra != 4 { + if extra != 5 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -2841,6 +2924,25 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { return xerrors.Errorf("unmarshaling t.MsgRct: %w", err) } + } + // t.InvokedActor (types.ActorTrace) (struct) + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.InvokedActor = new(ActorTrace) + if err := t.InvokedActor.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.InvokedActor pointer: %w", err) + } + } + } // t.GasCharges ([]*types.GasTrace) (slice) @@ -2887,9 +2989,9 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Subcalls ([]types.ExecutionTrace) (slice) maj, extra, err = cr.ReadHeader() @@ -2925,8 +3027,8 @@ func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index bff15ed24..35fed87d8 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -349,6 +349,13 @@ func IsEthAddress(addr address.Address) bool { return namespace == builtintypes.EthereumAddressManagerActorID && len(payload) == 20 && !bytes.HasPrefix(payload, maskedIDPrefix[:]) } +func EthAddressFromActorID(id abi.ActorID) EthAddress { + var ethaddr EthAddress + ethaddr[0] = 0xff + binary.BigEndian.PutUint64(ethaddr[12:], uint64(id)) + return ethaddr +} + func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) { switch addr.Protocol() { case address.ID: @@ -356,10 +363,7 @@ func EthAddressFromFilecoinAddress(addr address.Address) (EthAddress, error) { if err != nil { return EthAddress{}, err } - var ethaddr EthAddress - ethaddr[0] = 0xff - binary.BigEndian.PutUint64(ethaddr[12:], id) - return ethaddr, nil + return EthAddressFromActorID(abi.ActorID(id)), nil case address.Delegated: payload := addr.Payload() namespace, n, err := varint.FromUvarint(payload) @@ -606,7 +610,7 @@ type EthFilterSpec struct { Topics EthTopicSpec `json:"topics"` // Restricts event logs returned to those emitted from messages contained in this tipset. - // If BlockHash is present in in the filter criteria, then neither FromBlock nor ToBlock are allowed. + // If BlockHash is present in the filter criteria, then neither FromBlock nor ToBlock are allowed. // Added in EIP-234 BlockHash *EthHash `json:"blockHash,omitempty"` } @@ -983,22 +987,12 @@ func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error { } type EthTrace struct { - Action EthTraceAction `json:"action"` - Result EthTraceResult `json:"result"` - Subtraces int `json:"subtraces"` - TraceAddress []int `json:"traceAddress"` - Type string `json:"Type"` - - Parent *EthTrace `json:"-"` - - // if a subtrace makes a call to GetBytecode, we store a pointer to that subtrace here - // which we then lookup when checking for delegatecall (InvokeContractDelegate) - LastByteCode *EthTrace `json:"-"` -} - -func (t *EthTrace) SetCallType(callType string) { - t.Action.CallType = callType - t.Type = callType + Type string `json:"type"` + Error string `json:"error,omitempty"` + Subtraces int `json:"subtraces"` + TraceAddress []int `json:"traceAddress"` + Action any `json:"action"` + Result any `json:"result"` } type EthTraceBlock struct { @@ -1017,21 +1011,29 @@ type EthTraceReplayBlockTransaction struct { VmTrace *string `json:"vmTrace"` } -type EthTraceAction struct { +type EthCallTraceAction struct { CallType string `json:"callType"` From EthAddress `json:"from"` To EthAddress `json:"to"` Gas EthUint64 `json:"gas"` - Input EthBytes `json:"input"` Value EthBigInt `json:"value"` - - FilecoinMethod abi.MethodNum `json:"-"` - FilecoinCodeCid cid.Cid `json:"-"` - FilecoinFrom address.Address `json:"-"` - FilecoinTo address.Address `json:"-"` + Input EthBytes `json:"input"` } -type EthTraceResult struct { +type EthCallTraceResult struct { GasUsed EthUint64 `json:"gasUsed"` Output EthBytes `json:"output"` } + +type EthCreateTraceAction struct { + From EthAddress `json:"from"` + Gas EthUint64 `json:"gas"` + Value EthBigInt `json:"value"` + Init EthBytes `json:"init"` +} + +type EthCreateTraceResult struct { + Address *EthAddress `json:"address,omitempty"` + GasUsed EthUint64 `json:"gasUsed"` + Code EthBytes `json:"code"` +} diff --git a/chain/types/event.go b/chain/types/event.go index 106a120e2..5f6415d49 100644 --- a/chain/types/event.go +++ b/chain/types/event.go @@ -28,7 +28,7 @@ type EventEntry struct { // The event value's codec Codec uint64 - // The event value + // The event value. It is encoded using the codec specified above Value []byte } diff --git a/chain/types/execresult.go b/chain/types/execresult.go index 4556f7b88..99bbb6ece 100644 --- a/chain/types/execresult.go +++ b/chain/types/execresult.go @@ -4,8 +4,6 @@ import ( "encoding/json" "time" - "github.com/ipfs/go-cid" - "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" @@ -28,7 +26,11 @@ type MessageTrace struct { ParamsCodec uint64 GasLimit uint64 ReadOnly bool - CodeCid cid.Cid +} + +type ActorTrace struct { + Id abi.ActorID + State Actor } type ReturnTrace struct { @@ -38,10 +40,11 @@ type ReturnTrace struct { } type ExecutionTrace struct { - Msg MessageTrace - MsgRct ReturnTrace - GasCharges []*GasTrace `cborgen:"maxlen=1000000000"` - Subcalls []ExecutionTrace `cborgen:"maxlen=1000000000"` + Msg MessageTrace + MsgRct ReturnTrace + InvokedActor *ActorTrace `json:",omitempty"` + GasCharges []*GasTrace `cborgen:"maxlen=1000000000"` + Subcalls []ExecutionTrace `cborgen:"maxlen=1000000000"` } func (et ExecutionTrace) SumGas() GasTrace { diff --git a/cli/client.go b/cli/client.go index 88f7ed208..81299b8fb 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1770,7 +1770,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS if v.DealID == 0 { return deal{ LocalDeal: v, - OnChainDealState: *market.EmptyDealState(), + OnChainDealState: market.EmptyDealState(), } } @@ -1781,7 +1781,7 @@ func dealFromDealInfo(ctx context.Context, full v0api.FullNode, head *types.TipS return deal{ LocalDeal: v, - OnChainDealState: onChain.State, + OnChainDealState: onChain.State.Iface(), } } @@ -1807,13 +1807,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, fmt.Fprintf(w, "Created\tDealCid\tDealId\tProvider\tState\tOn Chain?\tSlashed?\tPieceCID\tSize\tPrice\tDuration\tTransferChannelID\tTransferStatus\tVerified\tMessage\n") for _, d := range deals { onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } price := types.FIL(types.BigMul(d.LocalDeal.PricePerEpoch, types.NewInt(d.LocalDeal.Duration))) @@ -1869,13 +1869,13 @@ func outputStorageDeals(ctx context.Context, out io.Writer, full v0api.FullNode, propcid := ellipsis(d.LocalDeal.ProposalCid.String(), 8) onChain := "N" - if d.OnChainDealState.SectorStartEpoch != -1 { - onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch) + if d.OnChainDealState.SectorStartEpoch() != -1 { + onChain = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SectorStartEpoch()) } slashed := "N" - if d.OnChainDealState.SlashEpoch != -1 { - slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch) + if d.OnChainDealState.SlashEpoch() != -1 { + slashed = fmt.Sprintf("Y (epoch %d)", d.OnChainDealState.SlashEpoch()) } piece := ellipsis(d.LocalDeal.PieceCID.String(), 8) diff --git a/cli/filplus.go b/cli/filplus.go index 9fbd2a489..e86fe0372 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "strconv" + "strings" cbor "github.com/ipfs/go-ipld-cbor" "github.com/urfave/cli/v2" @@ -233,16 +234,21 @@ var filplusListClientsCmd = &cli.Command{ var filplusListAllocationsCmd = &cli.Command{ Name: "list-allocations", - Usage: "List allocations made by client", + Usage: "List allocations available in verified registry actor or made by a client if specified", ArgsUsage: "clientAddress", Flags: []cli.Flag{ &cli.BoolFlag{ Name: "expired", Usage: "list only expired allocations", }, + &cli.BoolFlag{ + Name: "json", + Usage: "output results in json format", + Value: false, + }, }, Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { + if cctx.NArg() > 1 { return IncorrectNumArgs(cctx) } @@ -253,14 +259,76 @@ var filplusListAllocationsCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - clientAddr, err := address.NewFromString(cctx.Args().Get(0)) - if err != nil { - return err - } + writeOut := func(tsHeight abi.ChainEpoch, allocations map[verifreg.AllocationId]verifreg.Allocation, json, expired bool) error { + // Map Keys. Corresponds to the standard tablewriter output + allocationID := "AllocationID" + client := "Client" + provider := "Miner" + pieceCid := "PieceCid" + pieceSize := "PieceSize" + tMin := "TermMin" + tMax := "TermMax" + expr := "Expiration" - clientIdAddr, err := api.StateLookupID(ctx, clientAddr, types.EmptyTSK) - if err != nil { - return err + // One-to-one mapping between tablewriter keys and JSON keys + tableKeysToJsonKeys := map[string]string{ + allocationID: strings.ToLower(allocationID), + client: strings.ToLower(client), + provider: strings.ToLower(provider), + pieceCid: strings.ToLower(pieceCid), + pieceSize: strings.ToLower(pieceSize), + tMin: strings.ToLower(tMin), + tMax: strings.ToLower(tMax), + expr: strings.ToLower(expr), + } + + var allocs []map[string]interface{} + + for key, val := range allocations { + if tsHeight > val.Expiration || !expired { + alloc := map[string]interface{}{ + allocationID: key, + client: val.Client, + provider: val.Provider, + pieceCid: val.Data, + pieceSize: val.Size, + tMin: val.TermMin, + tMax: val.TermMax, + expr: val.Expiration, + } + allocs = append(allocs, alloc) + } + } + + if json { + // get a new list of allocations with json keys instead of tablewriter keys + var jsonAllocs []map[string]interface{} + for _, alloc := range allocs { + jsonAlloc := make(map[string]interface{}) + for k, v := range alloc { + jsonAlloc[tableKeysToJsonKeys[k]] = v + } + jsonAllocs = append(jsonAllocs, jsonAlloc) + } + // then return this! + return PrintJson(jsonAllocs) + } + // Init the tablewriter's columns + tw := tablewriter.New( + tablewriter.Col(allocationID), + tablewriter.Col(client), + tablewriter.Col(provider), + tablewriter.Col(pieceCid), + tablewriter.Col(pieceSize), + tablewriter.Col(tMin), + tablewriter.Col(tMax), + tablewriter.NewLineCol(expr)) + // populate it with content + for _, alloc := range allocs { + tw.Write(alloc) + } + // return the corresponding string + return tw.Flush(os.Stdout) } store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) @@ -280,41 +348,38 @@ var filplusListAllocationsCmd = &cli.Command{ return err } - allocationsMap, err := verifregState.GetAllocations(clientIdAddr) + if cctx.NArg() == 1 { + clientAddr, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + clientIdAddr, err := api.StateLookupID(ctx, clientAddr, types.EmptyTSK) + if err != nil { + return err + } + + allocationsMap, err := verifregState.GetAllocations(clientIdAddr) + if err != nil { + return err + } + + return writeOut(ts.Height(), allocationsMap, cctx.Bool("json"), cctx.Bool("expired")) + } + + allocationsMap, err := verifregState.GetAllAllocations() if err != nil { return err } - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("Provider"), - tablewriter.Col("Data"), - tablewriter.Col("Size"), - tablewriter.Col("TermMin"), - tablewriter.Col("TermMax"), - tablewriter.Col("Expiration"), - ) + return writeOut(ts.Height(), allocationsMap, cctx.Bool("json"), cctx.Bool("expired")) - for allocationId, allocation := range allocationsMap { - if ts.Height() > allocation.Expiration || !cctx.IsSet("expired") { - tw.Write(map[string]interface{}{ - "ID": allocationId, - "Provider": allocation.Provider, - "Data": allocation.Data, - "Size": allocation.Size, - "TermMin": allocation.TermMin, - "TermMax": allocation.TermMax, - "Expiration": allocation.Expiration, - }) - } - } - return tw.Flush(os.Stdout) }, } var filplusListClaimsCmd = &cli.Command{ Name: "list-claims", - Usage: "List claims made by provider", + Usage: "List claims available in verified registry actor or made by provider if specified", ArgsUsage: "providerAddress", Flags: []cli.Flag{ &cli.BoolFlag{ @@ -323,7 +388,7 @@ var filplusListClaimsCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - if cctx.NArg() != 1 { + if cctx.NArg() > 1 { return IncorrectNumArgs(cctx) } @@ -334,14 +399,81 @@ var filplusListClaimsCmd = &cli.Command{ defer closer() ctx := ReqContext(cctx) - providerAddr, err := address.NewFromString(cctx.Args().Get(0)) - if err != nil { - return err - } + writeOut := func(tsHeight abi.ChainEpoch, claims map[verifreg.ClaimId]verifreg.Claim, json, expired bool) error { + // Map Keys. Corresponds to the standard tablewriter output + claimID := "ClaimID" + provider := "Provider" + client := "Client" + data := "Data" + size := "Size" + tMin := "TermMin" + tMax := "TermMax" + tStart := "TermStart" + sector := "Sector" - providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) - if err != nil { - return err + // One-to-one mapping between tablewriter keys and JSON keys + tableKeysToJsonKeys := map[string]string{ + claimID: strings.ToLower(claimID), + provider: strings.ToLower(provider), + client: strings.ToLower(client), + data: strings.ToLower(data), + size: strings.ToLower(size), + tMin: strings.ToLower(tMin), + tMax: strings.ToLower(tMax), + tStart: strings.ToLower(tStart), + sector: strings.ToLower(sector), + } + + var claimList []map[string]interface{} + + for key, val := range claims { + if tsHeight > val.TermMax || !expired { + claim := map[string]interface{}{ + claimID: key, + provider: val.Provider, + client: val.Client, + data: val.Data, + size: val.Size, + tMin: val.TermMin, + tMax: val.TermMax, + tStart: val.TermStart, + sector: val.Sector, + } + claimList = append(claimList, claim) + } + } + + if json { + // get a new list of claims with json keys instead of tablewriter keys + var jsonClaims []map[string]interface{} + for _, claim := range claimList { + jsonClaim := make(map[string]interface{}) + for k, v := range claim { + jsonClaim[tableKeysToJsonKeys[k]] = v + } + jsonClaims = append(jsonClaims, jsonClaim) + } + // then return this! + return PrintJson(jsonClaims) + } + // Init the tablewriter's columns + tw := tablewriter.New( + tablewriter.Col(claimID), + tablewriter.Col(client), + tablewriter.Col(provider), + tablewriter.Col(data), + tablewriter.Col(size), + tablewriter.Col(tMin), + tablewriter.Col(tMax), + tablewriter.Col(tStart), + tablewriter.NewLineCol(sector)) + // populate it with content + for _, alloc := range claimList { + + tw.Write(alloc) + } + // return the corresponding string + return tw.Flush(os.Stdout) } store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api))) @@ -361,39 +493,32 @@ var filplusListClaimsCmd = &cli.Command{ return err } - claimsMap, err := verifregState.GetClaims(providerIdAddr) + if cctx.NArg() == 1 { + providerAddr, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + providerIdAddr, err := api.StateLookupID(ctx, providerAddr, types.EmptyTSK) + if err != nil { + return err + } + + claimsMap, err := verifregState.GetClaims(providerIdAddr) + if err != nil { + return err + } + + return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired")) + } + + claimsMap, err := verifregState.GetAllClaims() if err != nil { return err } - tw := tablewriter.New( - tablewriter.Col("ID"), - tablewriter.Col("Provider"), - tablewriter.Col("Client"), - tablewriter.Col("Data"), - tablewriter.Col("Size"), - tablewriter.Col("TermMin"), - tablewriter.Col("TermMax"), - tablewriter.Col("TermStart"), - tablewriter.Col("Sector"), - ) + return writeOut(ts.Height(), claimsMap, cctx.Bool("json"), cctx.Bool("expired")) - for claimId, claim := range claimsMap { - if ts.Height() > claim.TermMax || !cctx.IsSet("expired") { - tw.Write(map[string]interface{}{ - "ID": claimId, - "Provider": claim.Provider, - "Client": claim.Client, - "Data": claim.Data, - "Size": claim.Size, - "TermMin": claim.TermMin, - "TermMax": claim.TermMax, - "TermStart": claim.TermStart, - "Sector": claim.Sector, - }) - } - } - return tw.Flush(os.Stdout) }, } diff --git a/cli/util.go b/cli/util.go index 03de817f9..de161f590 100644 --- a/cli/util.go +++ b/cli/util.go @@ -2,6 +2,8 @@ package cli import ( "context" + "encoding/json" + "fmt" "os" "github.com/fatih/color" @@ -37,3 +39,13 @@ func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types return types.NewTipSet(headers) } + +func PrintJson(obj interface{}) error { + resJson, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return fmt.Errorf("marshalling json: %w", err) + } + + fmt.Println(string(resJson)) + return nil +} diff --git a/cli/util/api.go b/cli/util/api.go index fe1ac1536..3602b752d 100644 --- a/cli/util/api.go +++ b/cli/util/api.go @@ -445,7 +445,7 @@ func GetFullNodeAPIV1LotusProvider(ctx *cli.Context, ainfoCfg []string, opts ... for _, head := range heads { v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header, rpcOpts...) if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s, Reason: %s", head.addr, err.Error()) + log.Warnf("Not able to establish connection to node with addr: %s", head.addr) continue } fullNodes = append(fullNodes, v1api) diff --git a/cmd/lotus-bench/amt_internal.go b/cmd/lotus-bench/amt_internal.go deleted file mode 100644 index f0e3035b7..000000000 --- a/cmd/lotus-bench/amt_internal.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copied from go-amt-ipld https://github.com/filecoin-project/go-amt-ipld/tree/master/internal -// which for some reason is a go internal package and therefore cannot be imported - -package main - -import ( - "fmt" - "io" - "math" - "sort" - - cid "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -type AMTRoot struct { - BitWidth uint64 - Height uint64 - Count uint64 - AMTNode AMTNode -} - -type AMTNode struct { - Bmap []byte - Links []cid.Cid - Values []*cbg.Deferred -} - -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -var _ = xerrors.Errorf -var _ = cid.Undef -var _ = math.E -var _ = sort.Sort - -var lengthBufAMTRoot = []byte{132} - -func (t *AMTRoot) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTRoot); err != nil { - return err - } - - // t.BitWidth (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.BitWidth); err != nil { - return err - } - - // t.Height (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Height); err != nil { - return err - } - - // t.Count (uint64) (uint64) - - if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, t.Count); err != nil { - return err - } - - // t.AMTNode (internal.AMTNode) (struct) - if err := t.AMTNode.MarshalCBOR(cw); err != nil { - return err - } - return nil -} - -func (t *AMTRoot) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTRoot{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BitWidth (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BitWidth = extra - - } - // t.Height (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Height = extra - - } - // t.Count (uint64) (uint64) - - { - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Count = extra - - } - // t.AMTNode (internal.AMTNode) (struct) - - { - - if err := t.AMTNode.UnmarshalCBOR(cr); err != nil { - return xerrors.Errorf("unmarshaling t.AMTNode: %w", err) - } - - } - return nil -} - -var lengthBufAMTNode = []byte{131} - -func (t *AMTNode) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - - cw := cbg.NewCborWriter(w) - - if _, err := cw.Write(lengthBufAMTNode); err != nil { - return err - } - - // t.Bmap ([]uint8) (slice) - if len(t.Bmap) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Bmap was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Bmap))); err != nil { - return err - } - - if _, err := cw.Write(t.Bmap[:]); err != nil { - return err - } - - // t.Links ([]cid.Cid) (slice) - if len(t.Links) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Links was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Links))); err != nil { - return err - } - for _, v := range t.Links { - if err := cbg.WriteCid(w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Links: %w", err) - } - } - - // t.Values ([]*typegen.Deferred) (slice) - if len(t.Values) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Values was too long") - } - - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Values))); err != nil { - return err - } - for _, v := range t.Values { - if err := v.MarshalCBOR(cw); err != nil { - return err - } - } - return nil -} - -func (t *AMTNode) UnmarshalCBOR(r io.Reader) (err error) { - *t = AMTNode{} - - cr := cbg.NewCborReader(r) - - maj, extra, err := cr.ReadHeader() - if err != nil { - return err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Bmap ([]uint8) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Bmap: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - - if extra > 0 { - t.Bmap = make([]uint8, extra) - } - - if _, err := io.ReadFull(cr, t.Bmap[:]); err != nil { - return err - } - // t.Links ([]cid.Cid) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Links: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Links = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(cr) - if err != nil { - return xerrors.Errorf("reading cid field t.Links failed: %w", err) - } - t.Links[i] = c - } - - // t.Values ([]*typegen.Deferred) (slice) - - maj, extra, err = cr.ReadHeader() - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Values: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Values = make([]*cbg.Deferred, extra) - } - - for i := 0; i < int(extra); i++ { - - var v cbg.Deferred - if err := v.UnmarshalCBOR(cr); err != nil { - return err - } - - t.Values[i] = &v - } - - return nil -} diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 7d3c0cde0..545ed1eb9 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "crypto/rand" "encoding/json" @@ -9,16 +8,9 @@ import ( "math/big" "os" "path/filepath" - "sync" "time" "github.com/docker/go-units" - "github.com/ipfs/boxo/blockservice" - "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - offline "github.com/ipfs/go-ipfs-exchange-offline" - cbor "github.com/ipfs/go-ipld-cbor" - format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/minio/blake2b-simd" "github.com/mitchellh/go-homedir" @@ -28,14 +20,10 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" prooftypes "github.com/filecoin-project/go-state-types/proof" - adt "github.com/filecoin-project/specs-actors/v6/actors/util/adt" lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" @@ -116,7 +104,6 @@ func main() { DisableSliceFlagSeparator: true, Commands: []*cli.Command{ proveCmd, - amtBenchCmd, sealBenchCmd, simpleCmd, importBenchCmd, @@ -131,211 +118,6 @@ func main() { } } -type amtStatCollector struct { - ds format.NodeGetter - walk func(format.Node) ([]*format.Link, error) - - statsLk sync.Mutex - totalAMTLinks int - totalAMTValues int - totalAMTLinkNodes int - totalAMTValueNodes int - totalAMTLinkNodeSize int - totalAMTValueNodeSize int -} - -func (asc *amtStatCollector) String() string { - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - str := "\n------------\n" - str += fmt.Sprintf("Link Count: %d\n", asc.totalAMTLinks) - str += fmt.Sprintf("Value Count: %d\n", asc.totalAMTValues) - str += fmt.Sprintf("%d link nodes %d bytes\n", asc.totalAMTLinkNodes, asc.totalAMTLinkNodeSize) - str += fmt.Sprintf("%d value nodes %d bytes\n", asc.totalAMTValueNodes, asc.totalAMTValueNodeSize) - str += fmt.Sprintf("Total bytes: %d\n------------\n", asc.totalAMTLinkNodeSize+asc.totalAMTValueNodeSize) - return str -} - -func (asc *amtStatCollector) record(ctx context.Context, nd format.Node) error { - size, err := nd.Size() - if err != nil { - return err - } - - var node AMTNode - if err := node.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - // try to deserialize root - var root AMTRoot - if err := root.UnmarshalCBOR(bytes.NewReader(nd.RawData())); err != nil { - return err - } - node = root.AMTNode - } - - asc.statsLk.Lock() - defer asc.statsLk.Unlock() - - link := len(node.Links) > 0 - value := len(node.Values) > 0 - - if link { - asc.totalAMTLinks += len(node.Links) - asc.totalAMTLinkNodes++ - asc.totalAMTLinkNodeSize += int(size) - } else if value { - asc.totalAMTValues += len(node.Values) - asc.totalAMTValueNodes++ - asc.totalAMTValueNodeSize += int(size) - } else { - return xerrors.Errorf("unexpected AMT node %x: neither link nor value", nd.RawData()) - } - - return nil -} - -func (asc *amtStatCollector) walkLinks(ctx context.Context, c cid.Cid) ([]*format.Link, error) { - nd, err := asc.ds.Get(ctx, c) - if err != nil { - return nil, err - } - - if err := asc.record(ctx, nd); err != nil { - return nil, err - } - - return asc.walk(nd) -} - -func carWalkFunc(nd format.Node) (out []*format.Link, err error) { - for _, link := range nd.Links() { - if link.Cid.Prefix().Codec == cid.FilCommitmentSealed || link.Cid.Prefix().Codec == cid.FilCommitmentUnsealed { - continue - } - out = append(out, link) - } - return out, nil -} - -var amtBenchCmd = &cli.Command{ - Name: "amt", - Usage: "Benchmark AMT churn", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "rounds", - Usage: "rounds of churn to measure", - Value: 1, - }, - &cli.IntFlag{ - Name: "interval", - Usage: "AMT idx interval for churning values", - Value: 2880, - }, - &cli.IntFlag{ - Name: "bitwidth", - Usage: "AMT bitwidth", - Value: 6, - }, - }, - Action: func(c *cli.Context) error { - bs := blockstore.NewMemory() - ctx := c.Context - store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) - - // Setup in memory blockstore - bitwidth := c.Int("bitwidth") - array, err := adt.MakeEmptyArray(store, bitwidth) - if err != nil { - return err - } - - // Using motivating empirical example: market actor states AMT - // Create 40,000,000 states for realistic workload - fmt.Printf("Populating AMT\n") - for i := 0; i < 40000000; i++ { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(-1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - - r, err := array.Root() - if err != nil { - return err - } - - // Measure ratio of internal / leaf nodes / sizes - dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc := &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring AMT\n") - seen := cid.NewSet() - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - - fmt.Printf("%s\n", asc) - - // Overwrite ids with idx % interval: one epoch of market cron - rounds := c.Int("rounds") - interval := c.Int("interval") - - fmt.Printf("Overwrite 1 out of %d values for %d rounds\n", interval, rounds) - array, err = adt.AsArray(store, r, bitwidth) - if err != nil { - return err - } - roots := make([]cid.Cid, rounds) - for j := 0; j < rounds; j++ { - if j%10 == 0 { - fmt.Printf("round: %d\n", j) - } - for i := j; i < 40000000; i += interval { - if i%interval == j { - if err := array.Set(uint64(i), &market.DealState{ - SectorStartEpoch: abi.ChainEpoch(2000000 + i), - LastUpdatedEpoch: abi.ChainEpoch(1), - SlashEpoch: -1, - VerifiedClaim: verifreg.AllocationId(i), - }); err != nil { - return err - } - } - } - roots[j], err = array.Root() - if err != nil { - return err - } - - } - - // Measure churn - dag = merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) - asc = &amtStatCollector{ - ds: dag, - walk: carWalkFunc, - } - - fmt.Printf("Measuring %d rounds of churn\n", rounds) - - for _, r := range roots { - if err := merkledag.Walk(ctx, asc.walkLinks, r, seen.Visit, merkledag.Concurrent()); err != nil { - return err - } - } - - fmt.Printf("%s\n", asc) - return nil - }, -} - var sealBenchCmd = &cli.Command{ Name: "sealing", Usage: "Benchmark seal and winning post and window post", diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index 1a4a98fc4..e27b2d716 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -55,6 +55,7 @@ import ( "github.com/filecoin-project/lotus/storage" "github.com/filecoin-project/lotus/storage/paths" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -120,11 +121,6 @@ var initCmd = &cli.Command{ Name: "from", Usage: "select which address to send actor creation message from", }, - &cli.Uint64Flag{ - Name: "confidence", - Usage: "number of block confirmations to wait for", - Value: build.MessageConfidence, - }, }, Subcommands: []*cli.Command{ restoreCmd, @@ -151,8 +147,6 @@ var initCmd = &cli.Command{ return xerrors.Errorf("failed to parse gas-price flag: %s", err) } - confidence := cctx.Uint64("confidence") - symlink := cctx.Bool("symlink-imported-sectors") if symlink { log.Info("will attempt to symlink to imported sectors") @@ -272,7 +266,7 @@ var initCmd = &cli.Command{ } } - if err := storageMinerInit(ctx, cctx, api, r, ssize, gasPrice, confidence); err != nil { + if err := storageMinerInit(ctx, cctx, api, r, ssize, gasPrice); err != nil { log.Errorf("Failed to initialize lotus-miner: %+v", err) path, err := homedir.Expand(repoPath) if err != nil { @@ -327,21 +321,21 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []lapi.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(lapi.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, - DealInfo: &lapi.PieceDealInfo{ + DealInfo: &piece.PieceDealInfo{ DealID: dealID, DealProposal: §or.Deal, - DealSchedule: lapi.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: sector.Deal.StartEpoch, EndEpoch: sector.Deal.EndEpoch, }, }, - }, + }), }, CommD: &commD, CommR: &commR, @@ -421,7 +415,7 @@ func findMarketDealID(ctx context.Context, api v1api.FullNode, deal markettypes. return 0, xerrors.New("deal not found") } -func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt, confidence uint64) error { +func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode, r repo.Repo, ssize abi.SectorSize, gasPrice types.BigInt) error { lr, err := r.Lock(repo.StorageMiner) if err != nil { return err @@ -508,7 +502,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode return xerrors.Errorf("failed to start up genesis miner: %w", err) } - cerr := configureStorageMiner(ctx, api, a, peerid, gasPrice, confidence) + cerr := configureStorageMiner(ctx, api, a, peerid, gasPrice) if err := m.Stop(ctx); err != nil { log.Error("failed to shut down miner: ", err) @@ -548,13 +542,13 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } } - if err := configureStorageMiner(ctx, api, a, peerid, gasPrice, confidence); err != nil { + if err := configureStorageMiner(ctx, api, a, peerid, gasPrice); err != nil { return xerrors.Errorf("failed to configure miner: %w", err) } addr = a } else { - a, err := createStorageMiner(ctx, api, ssize, peerid, gasPrice, confidence, cctx) + a, err := createStorageMiner(ctx, api, ssize, peerid, gasPrice, cctx) if err != nil { return xerrors.Errorf("creating miner failed: %w", err) } @@ -596,7 +590,7 @@ func makeHostKey(lr repo.LockedRepo) (crypto.PrivKey, error) { return pk, nil } -func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt, confidence uint64) error { +func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address.Address, peerid peer.ID, gasPrice types.BigInt) error { mi, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK) if err != nil { return xerrors.Errorf("getWorkerAddr returned bad address: %w", err) @@ -622,7 +616,7 @@ func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address } log.Info("Waiting for message: ", smsg.Cid()) - ret, err := api.StateWaitMsg(ctx, smsg.Cid(), confidence, lapi.LookbackNoLimit, true) + ret, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return err } @@ -634,7 +628,7 @@ func configureStorageMiner(ctx context.Context, api v1api.FullNode, addr address return nil } -func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.SectorSize, peerid peer.ID, gasPrice types.BigInt, confidence uint64, cctx *cli.Context) (address.Address, error) { +func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.SectorSize, peerid peer.ID, gasPrice types.BigInt, cctx *cli.Context) (address.Address, error) { var err error var owner address.Address if cctx.String("owner") != "" { @@ -686,7 +680,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true) + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return address.Undef, xerrors.Errorf("waiting for worker init: %w", err) } @@ -710,7 +704,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true) + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) } @@ -759,7 +753,7 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, ssize abi.Secto log.Infof("Pushed CreateMiner message: %s", signed.Cid()) log.Infof("Waiting for confirmation") - mw, err := api.StateWaitMsg(ctx, signed.Cid(), confidence, lapi.LookbackNoLimit, true) + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) if err != nil { return address.Undef, xerrors.Errorf("waiting for createMiner message: %w", err) } diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go index 272754c23..7e28729bb 100644 --- a/cmd/lotus-miner/init_restore.go +++ b/cmd/lotus-miner/init_restore.go @@ -80,7 +80,8 @@ var restoreCmd = &cli.Command{ } log.Info("Configuring miner actor") - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil { + + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { return err } diff --git a/cmd/lotus-miner/init_service.go b/cmd/lotus-miner/init_service.go index 876313941..235e4e4c8 100644 --- a/cmd/lotus-miner/init_service.go +++ b/cmd/lotus-miner/init_service.go @@ -105,7 +105,7 @@ var serviceCmd = &cli.Command{ if es.Contains(MarketsService) { log.Info("Configuring miner actor") - if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero(), cctx.Uint64("confidence")); err != nil { + if err := configureStorageMiner(ctx, api, maddr, peerid, big.Zero()); err != nil { return err } } diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 3e4439eb8..8b8fc65cb 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -487,6 +487,13 @@ var sectorsListCmd = &cli.Command{ } } + var pams int + for _, p := range st.Pieces { + if p.DealInfo != nil && p.DealInfo.PieceActivationManifest != nil { + pams++ + } + } + exp := st.Expiration if st.OnTime > 0 && st.OnTime < exp { exp = st.OnTime // Can be different when the sector was CC upgraded @@ -501,6 +508,8 @@ var sectorsListCmd = &cli.Command{ if deals > 0 { m["Deals"] = color.GreenString("%d", deals) + } else if pams > 0 { + m["Deals"] = color.MagentaString("DDO:%d", pams) } else { m["Deals"] = color.BlueString("CC") if st.ToUpgrade { @@ -2290,7 +2299,7 @@ var sectorsCompactPartitionsCmd = &cli.Command{ if len(parts) <= 0 { return fmt.Errorf("must include at least one partition to compact") } - fmt.Printf("compacting %d partitions\n", len(parts)) + fmt.Printf("compacting %d paritions\n", len(parts)) var makeMsgForPartitions func(partitionsBf bitfield.BitField) ([]*types.Message, error) makeMsgForPartitions = func(partitionsBf bitfield.BitField) ([]*types.Message, error) { diff --git a/cmd/lotus-provider/config.go b/cmd/lotus-provider/config.go index 44ba49beb..5bd681429 100644 --- a/cmd/lotus-provider/config.go +++ b/cmd/lotus-provider/config.go @@ -2,6 +2,7 @@ package main import ( "context" + "database/sql" "errors" "fmt" "io" @@ -13,7 +14,6 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" "github.com/filecoin-project/lotus/node/config" ) @@ -77,7 +77,7 @@ var configSetCmd = &cli.Command{ Action: func(cctx *cli.Context) error { args := cctx.Args() - db, err := deps.MakeDB(cctx) + db, err := makeDB(cctx) if err != nil { return err } @@ -109,8 +109,9 @@ var configSetCmd = &cli.Command{ } _ = lp - err = setConfig(db, name, string(bytes)) - + _, err = db.Exec(context.Background(), + `INSERT INTO harmony_config (title, config) VALUES ($1, $2) + ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, string(bytes)) if err != nil { return fmt.Errorf("unable to save config layer: %w", err) } @@ -120,13 +121,6 @@ var configSetCmd = &cli.Command{ }, } -func setConfig(db *harmonydb.DB, name, config string) error { - _, err := db.Exec(context.Background(), - `INSERT INTO harmony_config (title, config) VALUES ($1, $2) - ON CONFLICT (title) DO UPDATE SET config = excluded.config`, name, config) - return err -} - var configGetCmd = &cli.Command{ Name: "get", Aliases: []string{"cat", "show"}, @@ -137,12 +131,13 @@ var configGetCmd = &cli.Command{ if args.Len() != 1 { return fmt.Errorf("want 1 layer arg, got %d", args.Len()) } - db, err := deps.MakeDB(cctx) + db, err := makeDB(cctx) if err != nil { return err } - cfg, err := getConfig(db, args.First()) + var cfg string + err = db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, args.First()).Scan(&cfg) if err != nil { return err } @@ -152,22 +147,13 @@ var configGetCmd = &cli.Command{ }, } -func getConfig(db *harmonydb.DB, layer string) (string, error) { - var cfg string - err := db.QueryRow(context.Background(), `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&cfg) - if err != nil { - return "", err - } - return cfg, nil -} - var configListCmd = &cli.Command{ Name: "list", Aliases: []string{"ls"}, Usage: "List config layers you can get.", Flags: []cli.Flag{}, Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) + db, err := makeDB(cctx) if err != nil { return err } @@ -194,7 +180,7 @@ var configRmCmd = &cli.Command{ if args.Len() != 1 { return errors.New("must have exactly 1 arg for the layer name") } - db, err := deps.MakeDB(cctx) + db, err := makeDB(cctx) if err != nil { return err } @@ -223,11 +209,11 @@ var configViewCmd = &cli.Command{ }, }, Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) + db, err := makeDB(cctx) if err != nil { return err } - lp, err := deps.GetConfig(cctx, db) + lp, err := getConfig(cctx, db) if err != nil { return err } @@ -239,3 +225,35 @@ var configViewCmd = &cli.Command{ return nil }, } + +func getConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig, error) { + lp := config.DefaultLotusProvider() + have := []string{} + layers := cctx.StringSlice("layers") + for _, layer := range layers { + text := "" + err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) + if err != nil { + if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { + return nil, fmt.Errorf("missing layer '%s' ", layer) + } + if layer == "base" { + return nil, errors.New(`lotus-provider defaults to a layer named 'base'. + Either use 'migrate' command or edit a base.toml and upload it with: lotus-provider config set base.toml`) + } + return nil, fmt.Errorf("could not read layer '%s': %w", layer, err) + } + meta, err := toml.Decode(text, &lp) + if err != nil { + return nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) + } + for _, k := range meta.Keys() { + have = append(have, strings.Join(k, " ")) + } + } + _ = have // FUTURE: verify that required fields are here. + // If config includes 3rd-party config, consider JSONSchema as a way that + // 3rd-parties can dynamically include config requirements and we can + // validate the config. Because of layering, we must validate @ startup. + return lp, nil +} diff --git a/cmd/lotus-provider/deps/deps.go b/cmd/lotus-provider/deps/deps.go deleted file mode 100644 index 7a8db855f..000000000 --- a/cmd/lotus-provider/deps/deps.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package deps provides the dependencies for the lotus provider node. -package deps - -import ( - "context" - "database/sql" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "os" - "strings" - - "github.com/BurntSushi/toml" - "github.com/gbrlsnchs/jwt/v3" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - logging "github.com/ipfs/go-log/v2" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/go-statestore" - - "github.com/filecoin-project/lotus/api" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/journal" - "github.com/filecoin-project/lotus/journal/alerting" - "github.com/filecoin-project/lotus/journal/fsjournal" - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/lotus/provider" - "github.com/filecoin-project/lotus/storage/ctladdr" - "github.com/filecoin-project/lotus/storage/paths" - "github.com/filecoin-project/lotus/storage/sealer" - "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -var log = logging.Logger("lotus-provider/deps") - -func MakeDB(cctx *cli.Context) (*harmonydb.DB, error) { - dbConfig := config.HarmonyDB{ - Username: cctx.String("db-user"), - Password: cctx.String("db-password"), - Hosts: strings.Split(cctx.String("db-host"), ","), - Database: cctx.String("db-name"), - Port: cctx.String("db-port"), - } - return harmonydb.NewFromConfig(dbConfig) -} - -type JwtPayload struct { - Allow []auth.Permission -} - -func StorageAuth(apiKey string) (sealer.StorageAuth, error) { - if apiKey == "" { - return nil, xerrors.Errorf("no api key provided") - } - - rawKey, err := base64.StdEncoding.DecodeString(apiKey) - if err != nil { - return nil, xerrors.Errorf("decoding api key: %w", err) - } - - key := jwt.NewHS256(rawKey) - - p := JwtPayload{ - Allow: []auth.Permission{"admin"}, - } - - token, err := jwt.Sign(&p, key) - if err != nil { - return nil, err - } - - headers := http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - return sealer.StorageAuth(headers), nil -} - -func GetDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { - var deps Deps - return &deps, deps.PopulateRemainingDeps(ctx, cctx, true) -} - -type Deps struct { - Cfg *config.LotusProviderConfig - DB *harmonydb.DB - Full api.FullNode - Verif storiface.Verifier - LW *sealer.LocalWorker - As *ctladdr.AddressSelector - Maddrs []dtypes.MinerAddress - Stor *paths.Remote - Si *paths.DBIndex - LocalStore *paths.Local - ListenAddr string -} - -const ( - FlagRepoPath = "repo-path" -) - -func (deps *Deps) PopulateRemainingDeps(ctx context.Context, cctx *cli.Context, makeRepo bool) error { - var err error - if makeRepo { - // Open repo - repoPath := cctx.String(FlagRepoPath) - fmt.Println("repopath", repoPath) - r, err := repo.NewFS(repoPath) - if err != nil { - return err - } - - ok, err := r.Exists() - if err != nil { - return err - } - if !ok { - if err := r.Init(repo.Provider); err != nil { - return err - } - } - } - - if deps.Cfg == nil { - deps.DB, err = MakeDB(cctx) - if err != nil { - return err - } - } - - if deps.Cfg == nil { - // The config feeds into task runners & their helpers - deps.Cfg, err = GetConfig(cctx, deps.DB) - if err != nil { - return err - } - } - - log.Debugw("config", "config", deps.Cfg) - - if deps.Verif == nil { - deps.Verif = ffiwrapper.ProofVerifier - } - - if deps.As == nil { - deps.As, err = provider.AddressSelector(&deps.Cfg.Addresses)() - if err != nil { - return err - } - } - - if deps.Si == nil { - de, err := journal.ParseDisabledEvents(deps.Cfg.Journal.DisabledEvents) - if err != nil { - return err - } - j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de) - if err != nil { - return err - } - go func() { - <-ctx.Done() - _ = j.Close() - }() - - al := alerting.NewAlertingSystem(j) - deps.Si = paths.NewDBIndex(al, deps.DB) - } - - if deps.Full == nil { - var fullCloser func() - cfgApiInfo := deps.Cfg.Apis.ChainApiInfo - if v := os.Getenv("FULLNODE_API_INFO"); v != "" { - cfgApiInfo = []string{v} - } - deps.Full, fullCloser, err = cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfgApiInfo) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - fullCloser() - }() - } - - bls := &paths.BasicLocalStorage{ - PathToJSON: cctx.String("storage-json"), - } - - if deps.ListenAddr == "" { - listenAddr := cctx.String("listen") - const unspecifiedAddress = "0.0.0.0" - addressSlice := strings.Split(listenAddr, ":") - if ip := net.ParseIP(addressSlice[0]); ip != nil { - if ip.String() == unspecifiedAddress { - rip, err := deps.DB.GetRoutableIP() - if err != nil { - return err - } - deps.ListenAddr = rip + ":" + addressSlice[1] - } - } - } - if deps.LocalStore == nil { - deps.LocalStore, err = paths.NewLocal(ctx, bls, deps.Si, []string{"http://" + deps.ListenAddr + "/remote"}) - if err != nil { - return err - } - } - - sa, err := StorageAuth(deps.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf(`'%w' while parsing the config toml's - [Apis] - StorageRPCSecret=%v -Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, deps.Cfg.Apis.StorageRPCSecret) - } - if deps.Stor == nil { - deps.Stor = paths.NewRemote(deps.LocalStore, deps.Si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) - } - if deps.LW == nil { - wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore())) - - // todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper - // maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably - // don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask) - deps.LW = sealer.NewLocalWorker(sealer.WorkerConfig{}, deps.Stor, deps.LocalStore, deps.Si, nil, wstates) - } - if len(deps.Maddrs) == 0 { - for _, s := range deps.Cfg.Addresses.MinerAddresses { - addr, err := address.NewFromString(s) - if err != nil { - return err - } - deps.Maddrs = append(deps.Maddrs, dtypes.MinerAddress(addr)) - } - } - fmt.Println("last line of populate") - return nil -} - -func GetConfig(cctx *cli.Context, db *harmonydb.DB) (*config.LotusProviderConfig, error) { - lp := config.DefaultLotusProvider() - have := []string{} - layers := cctx.StringSlice("layers") - for _, layer := range layers { - text := "" - err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - if err != nil { - if strings.Contains(err.Error(), sql.ErrNoRows.Error()) { - return nil, fmt.Errorf("missing layer '%s' ", layer) - } - if layer == "base" { - return nil, errors.New(`lotus-provider defaults to a layer named 'base'. - Either use 'migrate' command or edit a base.toml and upload it with: lotus-provider config set base.toml`) - } - return nil, fmt.Errorf("could not read layer '%s': %w", layer, err) - } - meta, err := toml.Decode(text, &lp) - if err != nil { - return nil, fmt.Errorf("could not read layer, bad toml %s: %w", layer, err) - } - for _, k := range meta.Keys() { - have = append(have, strings.Join(k, " ")) - } - log.Infow("Using layer", "layer", layer, "config", lp) - } - _ = have // FUTURE: verify that required fields are here. - // If config includes 3rd-party config, consider JSONSchema as a way that - // 3rd-parties can dynamically include config requirements and we can - // validate the config. Because of layering, we must validate @ startup. - return lp, nil -} diff --git a/cmd/lotus-provider/main.go b/cmd/lotus-provider/main.go index 1b025303c..19cc6f5f9 100644 --- a/cmd/lotus-provider/main.go +++ b/cmd/lotus-provider/main.go @@ -5,18 +5,16 @@ import ( "fmt" "os" "os/signal" - "runtime/pprof" + "runtime/debug" "syscall" "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/tracing" "github.com/filecoin-project/lotus/node/repo" @@ -30,8 +28,8 @@ func SetupCloseHandler() { go func() { <-c fmt.Println("\r- Ctrl+C pressed in Terminal") - _ = pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - panic(1) + debug.PrintStack() + os.Exit(1) }() } @@ -46,7 +44,6 @@ func main() { stopCmd, configCmd, testCmd, - webCmd, //backupCmd, //lcli.WithCategory("chain", actorCmd), //lcli.WithCategory("storage", sectorsCmd), @@ -134,7 +131,7 @@ func main() { Value: "base", }, &cli.StringFlag{ - Name: deps.FlagRepoPath, + Name: FlagRepoPath, EnvVars: []string{"LOTUS_REPO_PATH"}, Value: "~/.lotusprovider", }, @@ -146,14 +143,8 @@ func main() { }, After: func(c *cli.Context) error { if r := recover(); r != nil { - p, err := homedir.Expand(c.String(FlagMinerRepo)) - if err != nil { - log.Errorw("could not expand repo path for panic report", "error", err) - panic(r) - } - // Generate report in LOTUS_PATH and re-raise panic - build.GeneratePanicReport(c.String("panic-reports"), p, c.App.Name) + build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagRepoPath), c.App.Name) panic(r) } return nil @@ -163,3 +154,7 @@ func main() { app.Metadata["repoType"] = repo.Provider lcli.RunApp(app) } + +const ( + FlagRepoPath = "repo-path" +) diff --git a/cmd/lotus-provider/proving.go b/cmd/lotus-provider/proving.go index 379bfdf85..a3211b176 100644 --- a/cmd/lotus-provider/proving.go +++ b/cmd/lotus-provider/proving.go @@ -15,7 +15,6 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" "github.com/filecoin-project/lotus/lib/harmony/harmonydb" "github.com/filecoin-project/lotus/provider" ) @@ -63,18 +62,18 @@ var wdPostTaskCmd = &cli.Command{ Action: func(cctx *cli.Context) error { ctx := context.Background() - deps, err := deps.GetDeps(ctx, cctx) + deps, err := getDeps(ctx, cctx) if err != nil { return err } - ts, err := deps.Full.ChainHead(ctx) + ts, err := deps.full.ChainHead(ctx) if err != nil { return xerrors.Errorf("cannot get chainhead %w", err) } ht := ts.Height() - addr, err := address.NewFromString(deps.Cfg.Addresses.MinerAddresses[0]) + addr, err := address.NewFromString(deps.cfg.Addresses.MinerAddresses[0]) if err != nil { return xerrors.Errorf("cannot get miner address %w", err) } @@ -83,10 +82,7 @@ var wdPostTaskCmd = &cli.Command{ return xerrors.Errorf("cannot get miner id %w", err) } var id int64 - - retryDelay := time.Millisecond * 10 - retryAddTask: - _, err = deps.DB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + _, err = deps.db.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { err = tx.QueryRow(`INSERT INTO harmony_task (name, posted_time, added_by) VALUES ('WdPost', CURRENT_TIMESTAMP, 123) RETURNING id`).Scan(&id) if err != nil { log.Error("inserting harmony_task: ", err) @@ -106,18 +102,13 @@ var wdPostTaskCmd = &cli.Command{ return true, nil }) if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryDelay) - retryDelay *= 2 - goto retryAddTask - } return xerrors.Errorf("writing SQL transaction: %w", err) } fmt.Printf("Inserted task %v. Waiting for success ", id) var result sql.NullString for { time.Sleep(time.Second) - err = deps.DB.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, id).Scan(&result) + err = deps.db.QueryRow(ctx, `SELECT result FROM harmony_test WHERE task_id=$1`, id).Scan(&result) if err != nil { return xerrors.Errorf("reading result from harmony_test: %w", err) } @@ -166,29 +157,29 @@ It will not send any messages to the chain. Since it can compute any deadline, o Action: func(cctx *cli.Context) error { ctx := context.Background() - deps, err := deps.GetDeps(ctx, cctx) + deps, err := getDeps(ctx, cctx) if err != nil { return err } - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.Cfg.Fees, deps.Cfg.Proving, deps.Full, deps.Verif, deps.LW, nil, - deps.As, deps.Maddrs, deps.DB, deps.Stor, deps.Si, deps.Cfg.Subsystems.WindowPostMaxTasks) + wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, deps.cfg.Fees, deps.cfg.Proving, deps.full, deps.verif, deps.lw, nil, + deps.as, deps.maddrs, deps.db, deps.stor, deps.si, deps.cfg.Subsystems.WindowPostMaxTasks) if err != nil { return err } _, _ = wdPoStSubmitTask, derlareRecoverTask - if len(deps.Maddrs) == 0 { + if len(deps.maddrs) == 0 { return errors.New("no miners to compute WindowPoSt for") } - head, err := deps.Full.ChainHead(ctx) + head, err := deps.full.ChainHead(ctx) if err != nil { return xerrors.Errorf("failed to get chain head: %w", err) } di := dline.NewInfo(head.Height(), cctx.Uint64("deadline"), 0, 0, 0, 10 /*challenge window*/, 0, 0) - for _, maddr := range deps.Maddrs { + for _, maddr := range deps.maddrs { out, err := wdPostTask.DoPartition(ctx, head, address.Address(maddr), di, cctx.Uint64("partition")) if err != nil { fmt.Println("Error computing WindowPoSt for miner", maddr, err) diff --git a/cmd/lotus-provider/rpc/rpc.go b/cmd/lotus-provider/rpc/rpc.go index e2897030f..3ae3e2a1f 100644 --- a/cmd/lotus-provider/rpc/rpc.go +++ b/cmd/lotus-provider/rpc/rpc.go @@ -1,34 +1,21 @@ -// Package rpc provides all direct access to this node. package rpc import ( "context" - "encoding/base64" - "encoding/json" - "net" "net/http" - "time" - "github.com/gbrlsnchs/jwt/v3" "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/tag" - "golang.org/x/sync/errgroup" - "golang.org/x/xerrors" + // logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" "github.com/filecoin-project/lotus/lib/rpcenc" - "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics/proxy" - "github.com/filecoin-project/lotus/provider/lpweb" - "github.com/filecoin-project/lotus/storage/paths" ) -var log = logging.Logger("lp/rpc") +//var log = logging.Logger("lp/rpc") func LotusProviderHandler( authv func(ctx context.Context, token string) ([]auth.Permission, error), @@ -62,94 +49,3 @@ func LotusProviderHandler( } return ah } - -type ProviderAPI struct { - *deps.Deps - ShutdownChan chan struct{} -} - -func (p *ProviderAPI) Version(context.Context) (api.Version, error) { - return api.ProviderAPIVersion0, nil -} - -// Trigger shutdown -func (p *ProviderAPI) Shutdown(context.Context) error { - close(p.ShutdownChan) - return nil -} - -func ListenAndServe(ctx context.Context, dependencies *deps.Deps, shutdownChan chan struct{}) error { - fh := &paths.FetchHandler{Local: dependencies.LocalStore, PfHandler: &paths.DefaultPartialFileHandler{}} - remoteHandler := func(w http.ResponseWriter, r *http.Request) { - if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { - w.WriteHeader(401) - _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) - return - } - - fh.ServeHTTP(w, r) - } - // local APIs - { - // debugging - mux := mux.NewRouter() - mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof - mux.PathPrefix("/remote").HandlerFunc(remoteHandler) - } - - var authVerify func(context.Context, string) ([]auth.Permission, error) - { - privateKey, err := base64.StdEncoding.DecodeString(dependencies.Cfg.Apis.StorageRPCSecret) - if err != nil { - return xerrors.Errorf("decoding storage rpc secret: %w", err) - } - authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) { - var payload deps.JwtPayload - if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil { - return nil, xerrors.Errorf("JWT Verification failed: %w", err) - } - - return payload.Allow, nil - } - } - // Serve the RPC. - srv := &http.Server{ - Handler: LotusProviderHandler( - authVerify, - remoteHandler, - &ProviderAPI{dependencies, shutdownChan}, - true), - ReadHeaderTimeout: time.Minute * 3, - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) - return ctx - }, - Addr: dependencies.ListenAddr, - } - - log.Infof("Setting up RPC server at %s", dependencies.ListenAddr) - eg := errgroup.Group{} - eg.Go(srv.ListenAndServe) - - if dependencies.Cfg.Subsystems.EnableWebGui { - web, err := lpweb.GetSrv(ctx, dependencies) - if err != nil { - return err - } - - go func() { - <-ctx.Done() - log.Warn("Shutting down...") - if err := srv.Shutdown(context.TODO()); err != nil { - log.Errorf("shutting down RPC server failed: %s", err) - } - if err := web.Shutdown(context.Background()); err != nil { - log.Errorf("shutting down web server failed: %s", err) - } - log.Warn("Graceful shutdown successful") - }() - log.Infof("Setting up web server at %s", dependencies.Cfg.Subsystems.GuiAddress) - eg.Go(web.ListenAndServe) - } - return eg.Wait() -} diff --git a/cmd/lotus-provider/run.go b/cmd/lotus-provider/run.go index b1a4ff828..de97aa766 100644 --- a/cmd/lotus-provider/run.go +++ b/cmd/lotus-provider/run.go @@ -2,24 +2,54 @@ package main import ( "context" + "encoding/base64" + "encoding/json" "fmt" + "net" + "net/http" "os" "strings" "time" + "github.com/gbrlsnchs/jwt/v3" + "github.com/gorilla/mux" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" "github.com/pkg/errors" + "github.com/samber/lo" "github.com/urfave/cli/v2" "go.opencensus.io/stats" "go.opencensus.io/tag" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-jsonrpc/auth" + "github.com/filecoin-project/go-statestore" + + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" + cliutil "github.com/filecoin-project/lotus/cli/util" "github.com/filecoin-project/lotus/cmd/lotus-provider/rpc" - "github.com/filecoin-project/lotus/cmd/lotus-provider/tasks" + "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/journal/alerting" + "github.com/filecoin-project/lotus/journal/fsjournal" + "github.com/filecoin-project/lotus/lib/harmony/harmonydb" + "github.com/filecoin-project/lotus/lib/harmony/harmonytask" "github.com/filecoin-project/lotus/lib/ulimit" "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/lotus/provider" + "github.com/filecoin-project/lotus/provider/lpmessage" + "github.com/filecoin-project/lotus/provider/lpwinning" + "github.com/filecoin-project/lotus/storage/ctladdr" + "github.com/filecoin-project/lotus/storage/paths" + "github.com/filecoin-project/lotus/storage/sealer" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" + "github.com/filecoin-project/lotus/storage/sealer/storiface" ) type stackTracer interface { @@ -114,26 +144,112 @@ var runCmd = &cli.Command{ } } - dependencies := &deps.Deps{} - err = dependencies.PopulateRemainingDeps(ctx, cctx, true) + deps, err := getDeps(ctx, cctx) + if err != nil { - fmt.Println("err", err) return err } - fmt.Println("ef") + cfg, db, full, verif, lw, as, maddrs, stor, si, localStore := deps.cfg, deps.db, deps.full, deps.verif, deps.lw, deps.as, deps.maddrs, deps.stor, deps.si, deps.localStore - taskEngine, err := tasks.StartTasks(ctx, dependencies) - fmt.Println("gh") + var activeTasks []harmonytask.TaskInterface - if err != nil { - return nil + sender, sendTask := lpmessage.NewSender(full, full, db) + activeTasks = append(activeTasks, sendTask) + + /////////////////////////////////////////////////////////////////////// + ///// Task Selection + /////////////////////////////////////////////////////////////////////// + { + + if cfg.Subsystems.EnableWindowPost { + wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender, + as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks) + if err != nil { + return err + } + activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask) + } + + if cfg.Subsystems.EnableWinningPost { + winPoStTask := lpwinning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs) + activeTasks = append(activeTasks, winPoStTask) + } } + log.Infow("This lotus_provider instance handles", + "miner_addresses", minerAddressesToStrings(maddrs), + "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) + + taskEngine, err := harmonytask.New(db, activeTasks, deps.listenAddr) + if err != nil { + return err + } + defer taskEngine.GracefullyTerminate(time.Hour) - err = rpc.ListenAndServe(ctx, dependencies, shutdownChan) // Monitor for shutdown. - if err != nil { - return err + fh := &paths.FetchHandler{Local: localStore, PfHandler: &paths.DefaultPartialFileHandler{}} + remoteHandler := func(w http.ResponseWriter, r *http.Request) { + if !auth.HasPerm(r.Context(), nil, api.PermAdmin) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing admin permission"}) + return + } + + fh.ServeHTTP(w, r) } + // local APIs + { + // debugging + mux := mux.NewRouter() + mux.PathPrefix("/").Handler(http.DefaultServeMux) // pprof + mux.PathPrefix("/remote").HandlerFunc(remoteHandler) + + /*ah := &auth.Handler{ + Verify: authv, + Next: mux.ServeHTTP, + }*/ // todo + + } + + var authVerify func(context.Context, string) ([]auth.Permission, error) + { + privateKey, err := base64.StdEncoding.DecodeString(deps.cfg.Apis.StorageRPCSecret) + if err != nil { + return xerrors.Errorf("decoding storage rpc secret: %w", err) + } + authVerify = func(ctx context.Context, token string) ([]auth.Permission, error) { + var payload jwtPayload + if _, err := jwt.Verify([]byte(token), jwt.NewHS256(privateKey), &payload); err != nil { + return nil, xerrors.Errorf("JWT Verification failed: %w", err) + } + + return payload.Allow, nil + } + } + // Serve the RPC. + srv := &http.Server{ + Handler: rpc.LotusProviderHandler( + authVerify, + remoteHandler, + &ProviderAPI{deps, shutdownChan}, + true), + ReadHeaderTimeout: time.Minute * 3, + BaseContext: func(listener net.Listener) context.Context { + ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-worker")) + return ctx + }, + } + + go func() { + <-ctx.Done() + log.Warn("Shutting down...") + if err := srv.Shutdown(context.TODO()); err != nil { + log.Errorf("shutting down RPC server failed: %s", err) + } + log.Warn("Graceful shutdown successful") + }() + + // Monitor for shutdown. + // TODO provide a graceful shutdown API on shutdownChan finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, //node.ShutdownHandler{Component: "provider", StopFunc: stop}, @@ -142,48 +258,210 @@ var runCmd = &cli.Command{ }, } -var webCmd = &cli.Command{ - Name: "web", - Usage: "Start lotus provider web interface", - Description: `Start an instance of lotus provider web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer.`, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "listen", - Usage: "Address to listen on", - Value: "127.0.0.1:4701", - }, - &cli.StringSliceFlag{ - Name: "layers", - Usage: "list of layers to be interpreted (atop defaults). Default: base. Web will be added", - Value: cli.NewStringSlice("base"), - }, - &cli.BoolFlag{ - Name: "nosync", - Usage: "don't check full-node sync status", - }, - }, - Action: func(cctx *cli.Context) error { - db, err := deps.MakeDB(cctx) - if err != nil { - return err - } - - webtxt, err := getConfig(db, "web") - if err != nil || webtxt == "" { - - s := `[Susbystems] - EnableWebGui = true - ` - if err = setConfig(db, "web", s); err != nil { - return err - } - } - layers := append([]string{"web"}, cctx.StringSlice("layers")...) - err = cctx.Set("layers", strings.Join(layers, ",")) - if err != nil { - return err - } - return runCmd.Action(cctx) - }, +func makeDB(cctx *cli.Context) (*harmonydb.DB, error) { + dbConfig := config.HarmonyDB{ + Username: cctx.String("db-user"), + Password: cctx.String("db-password"), + Hosts: strings.Split(cctx.String("db-host"), ","), + Database: cctx.String("db-name"), + Port: cctx.String("db-port"), + } + return harmonydb.NewFromConfig(dbConfig) +} + +type jwtPayload struct { + Allow []auth.Permission +} + +func StorageAuth(apiKey string) (sealer.StorageAuth, error) { + if apiKey == "" { + return nil, xerrors.Errorf("no api key provided") + } + + rawKey, err := base64.StdEncoding.DecodeString(apiKey) + if err != nil { + return nil, xerrors.Errorf("decoding api key: %w", err) + } + + key := jwt.NewHS256(rawKey) + + p := jwtPayload{ + Allow: []auth.Permission{"admin"}, + } + + token, err := jwt.Sign(&p, key) + if err != nil { + return nil, err + } + + headers := http.Header{} + headers.Add("Authorization", "Bearer "+string(token)) + return sealer.StorageAuth(headers), nil +} + +type Deps struct { + cfg *config.LotusProviderConfig + db *harmonydb.DB + full api.FullNode + verif storiface.Verifier + lw *sealer.LocalWorker + as *ctladdr.AddressSelector + maddrs []dtypes.MinerAddress + stor *paths.Remote + si *paths.DBIndex + localStore *paths.Local + listenAddr string +} + +func getDeps(ctx context.Context, cctx *cli.Context) (*Deps, error) { + // Open repo + + repoPath := cctx.String(FlagRepoPath) + fmt.Println("repopath", repoPath) + r, err := repo.NewFS(repoPath) + if err != nil { + return nil, err + } + + ok, err := r.Exists() + if err != nil { + return nil, err + } + if !ok { + if err := r.Init(repo.Provider); err != nil { + return nil, err + } + } + + db, err := makeDB(cctx) + if err != nil { + return nil, err + } + + /////////////////////////////////////////////////////////////////////// + ///// Dependency Setup + /////////////////////////////////////////////////////////////////////// + + // The config feeds into task runners & their helpers + cfg, err := getConfig(cctx, db) + if err != nil { + return nil, err + } + + log.Debugw("config", "config", cfg) + + var verif storiface.Verifier = ffiwrapper.ProofVerifier + + as, err := provider.AddressSelector(&cfg.Addresses)() + if err != nil { + return nil, err + } + + de, err := journal.ParseDisabledEvents(cfg.Journal.DisabledEvents) + if err != nil { + return nil, err + } + j, err := fsjournal.OpenFSJournalPath(cctx.String("journal"), de) + if err != nil { + return nil, err + } + + full, fullCloser, err := cliutil.GetFullNodeAPIV1LotusProvider(cctx, cfg.Apis.ChainApiInfo) + if err != nil { + return nil, err + } + + go func() { + select { + case <-ctx.Done(): + fullCloser() + _ = j.Close() + } + }() + sa, err := StorageAuth(cfg.Apis.StorageRPCSecret) + if err != nil { + return nil, xerrors.Errorf(`'%w' while parsing the config toml's + [Apis] + StorageRPCSecret=%v +Get it with: jq .PrivateKey ~/.lotus-miner/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU`, err, cfg.Apis.StorageRPCSecret) + } + + al := alerting.NewAlertingSystem(j) + si := paths.NewDBIndex(al, db) + bls := &paths.BasicLocalStorage{ + PathToJSON: cctx.String("storage-json"), + } + + listenAddr := cctx.String("listen") + const unspecifiedAddress = "0.0.0.0" + addressSlice := strings.Split(listenAddr, ":") + if ip := net.ParseIP(addressSlice[0]); ip != nil { + if ip.String() == unspecifiedAddress { + rip, err := db.GetRoutableIP() + if err != nil { + return nil, err + } + listenAddr = rip + ":" + addressSlice[1] + } + } + localStore, err := paths.NewLocal(ctx, bls, si, []string{"http://" + listenAddr + "/remote"}) + if err != nil { + return nil, err + } + + stor := paths.NewRemote(localStore, si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{}) + + wstates := statestore.New(dssync.MutexWrap(ds.NewMapDatastore())) + + // todo localWorker isn't the abstraction layer we want to use here, we probably want to go straight to ffiwrapper + // maybe with a lotus-provider specific abstraction. LocalWorker does persistent call tracking which we probably + // don't need (ehh.. maybe we do, the async callback system may actually work decently well with harmonytask) + lw := sealer.NewLocalWorker(sealer.WorkerConfig{}, stor, localStore, si, nil, wstates) + + var maddrs []dtypes.MinerAddress + for _, s := range cfg.Addresses.MinerAddresses { + addr, err := address.NewFromString(s) + if err != nil { + return nil, err + } + maddrs = append(maddrs, dtypes.MinerAddress(addr)) + } + + return &Deps{ // lint: intentionally not-named so it will fail if one is forgotten + cfg, + db, + full, + verif, + lw, + as, + maddrs, + stor, + si, + localStore, + listenAddr, + }, nil + +} + +type ProviderAPI struct { + *Deps + ShutdownChan chan struct{} +} + +func (p *ProviderAPI) Version(context.Context) (api.Version, error) { + return api.ProviderAPIVersion0, nil +} + +// Trigger shutdown +func (p *ProviderAPI) Shutdown(context.Context) error { + close(p.ShutdownChan) + return nil +} + +func minerAddressesToStrings(maddrs []dtypes.MinerAddress) []string { + strs := make([]string, len(maddrs)) + for i, addr := range maddrs { + strs[i] = address.Address(addr).String() + } + return strs } diff --git a/cmd/lotus-provider/tasks/tasks.go b/cmd/lotus-provider/tasks/tasks.go deleted file mode 100644 index 2c4cd58bf..000000000 --- a/cmd/lotus-provider/tasks/tasks.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package tasks contains tasks that can be run by the lotus-provider command. -package tasks - -import ( - "context" - - logging "github.com/ipfs/go-log/v2" - "github.com/samber/lo" - - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" - "github.com/filecoin-project/lotus/lib/harmony/harmonytask" - "github.com/filecoin-project/lotus/provider" - "github.com/filecoin-project/lotus/provider/lpmessage" - "github.com/filecoin-project/lotus/provider/lpwinning" -) - -var log = logging.Logger("lotus-provider/deps") - -func StartTasks(ctx context.Context, dependencies *deps.Deps) (*harmonytask.TaskEngine, error) { - cfg := dependencies.Cfg - db := dependencies.DB - full := dependencies.Full - verif := dependencies.Verif - lw := dependencies.LW - as := dependencies.As - maddrs := dependencies.Maddrs - stor := dependencies.Stor - si := dependencies.Si - var activeTasks []harmonytask.TaskInterface - - sender, sendTask := lpmessage.NewSender(full, full, db) - activeTasks = append(activeTasks, sendTask) - - /////////////////////////////////////////////////////////////////////// - ///// Task Selection - /////////////////////////////////////////////////////////////////////// - { - - if cfg.Subsystems.EnableWindowPost { - wdPostTask, wdPoStSubmitTask, derlareRecoverTask, err := provider.WindowPostScheduler(ctx, cfg.Fees, cfg.Proving, full, verif, lw, sender, - as, maddrs, db, stor, si, cfg.Subsystems.WindowPostMaxTasks) - if err != nil { - return nil, err - } - activeTasks = append(activeTasks, wdPostTask, wdPoStSubmitTask, derlareRecoverTask) - } - - if cfg.Subsystems.EnableWinningPost { - winPoStTask := lpwinning.NewWinPostTask(cfg.Subsystems.WinningPostMaxTasks, db, lw, verif, full, maddrs) - activeTasks = append(activeTasks, winPoStTask) - } - } - log.Infow("This lotus_provider instance handles", - "miner_addresses", maddrs, - "tasks", lo.Map(activeTasks, func(t harmonytask.TaskInterface, _ int) string { return t.TypeDetails().Name })) - - return harmonytask.New(db, activeTasks, dependencies.ListenAddr) -} diff --git a/cmd/lotus-shed/adl.go b/cmd/lotus-shed/adl.go new file mode 100644 index 000000000..762f78b6c --- /dev/null +++ b/cmd/lotus-shed/adl.go @@ -0,0 +1,124 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" + "github.com/urfave/cli/v2" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" +) + +var adlCmd = &cli.Command{ + Name: "adl", + Usage: "adl manipulation commands", + Subcommands: []*cli.Command{ + adlAmtCmd, + }, +} + +var adlAmtCmd = &cli.Command{ + Name: "amt", + Usage: "AMT manipulation commands", + Subcommands: []*cli.Command{ + adlAmtGetCmd, + }, +} + +var adlAmtGetCmd = &cli.Command{ + Name: "get", + Usage: "Get an element from an AMT", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two hamts (use lotus-shed export-car)", + }, + &cli.IntFlag{ + Name: "bitwidth", + Usage: "bitwidth of the HAMT", + Value: 5, + }, + &cli.StringFlag{ + Name: "root", + Usage: "root cid of the HAMT", + }, + &cli.Int64Flag{ + Name: "key", + Usage: "key to get", + }, + }, + Action: func(cctx *cli.Context) error { + bs := blockstore.NewMemorySync() + + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + + root, err := cid.Parse(cctx.String("root")) + if err != nil { + return err + } + + m, err := adt13.AsArray(adt.WrapStore(cctx.Context, cbor.NewCborStore(bs)), root, cctx.Int("bitwidth")) + if err != nil { + return err + } + + var out cbg.Deferred + ok, err := m.Get(cctx.Uint64("key"), &out) + if err != nil { + return err + } + if !ok { + return xerrors.Errorf("no such element") + } + + fmt.Printf("RAW: %x\n", out.Raw) + fmt.Println("----") + + var i interface{} + if err := cbor.DecodeInto(out.Raw, &i); err == nil { + ij, err := json.MarshalIndent(i, "", " ") + if err != nil { + return err + } + + fmt.Println(string(ij)) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/diff.go b/cmd/lotus-shed/diff.go index 981dc850c..a8eac6575 100644 --- a/cmd/lotus-shed/diff.go +++ b/cmd/lotus-shed/diff.go @@ -1,20 +1,31 @@ package main import ( + "bytes" "context" + "encoding/json" "fmt" "io" + "os" + "github.com/fatih/color" "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" "github.com/urfave/cli/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-hamt-ipld/v3" "github.com/filecoin-project/go-state-types/abi" miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" ) @@ -24,6 +35,8 @@ var diffCmd = &cli.Command{ Subcommands: []*cli.Command{ diffStateTrees, diffMinerStates, + diffHAMTs, + diffAMTs, }, } @@ -64,7 +77,9 @@ var diffMinerStates = &cli.Command{ return err } - defer lkrepo.Close() //nolint:errcheck + defer func(lkrepo repo.LockedRepo) { + _ = lkrepo.Close() + }(lkrepo) bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) if err != nil { @@ -258,3 +273,247 @@ var diffStateTrees = &cli.Command{ return nil }, } + +var diffHAMTs = &cli.Command{ + Name: "hamts", + Usage: "diff two HAMTs", + ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two hamts (use lotus-shed export-car)", + }, + &cli.IntFlag{ + Name: "bitwidth", + Usage: "bitwidth of the HAMT", + Value: 5, + }, + &cli.StringFlag{ + Name: "key-type", + Usage: "type of the key", + Value: "uint", + }, + }, + Action: func(cctx *cli.Context) error { + var bs blockstore.Blockstore = blockstore.NewMemorySync() + + if cctx.IsSet("car-file") { + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + } else { + // use running node + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("connect to full node: %w", err) + } + defer closer() + + bs = blockstore.NewAPIBlockstore(api) + } + + cidA, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return err + } + + cidB, err := cid.Parse(cctx.Args().Get(1)) + if err != nil { + return err + } + + cst := cbor.NewCborStore(bs) + + var keyParser func(k string) (interface{}, error) + switch cctx.String("key-type") { + case "uint": + keyParser = func(k string) (interface{}, error) { + return abi.ParseUIntKey(k) + } + case "actor": + keyParser = func(k string) (interface{}, error) { + return address.NewFromBytes([]byte(k)) + } + default: + return fmt.Errorf("unknown key type: %s", cctx.String("key-type")) + } + + diffs, err := hamt.Diff(cctx.Context, cst, cst, cidA, cidB, hamt.UseTreeBitWidth(cctx.Int("bitwidth"))) + if err != nil { + return err + } + + for _, d := range diffs { + switch d.Type { + case hamt.Add: + color.Green("+ Add %v", must.One(keyParser(d.Key))) + case hamt.Remove: + color.Red("- Remove %v", must.One(keyParser(d.Key))) + case hamt.Modify: + color.Yellow("~ Modify %v", must.One(keyParser(d.Key))) + } + } + + return nil + }, +} + +var diffAMTs = &cli.Command{ + Name: "amts", + Usage: "diff two AMTs", + ArgsUsage: " ", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "car-file", + Usage: "write a car file with two amts (use lotus-shed export-car)", + }, + &cli.UintFlag{ + Name: "bitwidth", + Usage: "bitwidth of the AMT", + Value: 5, + }, + }, + Action: func(cctx *cli.Context) error { + var bs blockstore.Blockstore = blockstore.NewMemorySync() + + if cctx.IsSet("car-file") { + f, err := os.Open(cctx.String("car-file")) + if err != nil { + return err + } + defer func(f *os.File) { + _ = f.Close() + }(f) + + cr, err := car.NewCarReader(f) + if err != nil { + return err + } + + for { + blk, err := cr.Next() + if err != nil { + if err == io.EOF { + break + } + return err + } + + if err := bs.Put(cctx.Context, blk); err != nil { + return err + } + } + } else { + // use running node + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return xerrors.Errorf("connect to full node: %w", err) + } + defer closer() + + bs = blockstore.NewAPIBlockstore(api) + } + + cidA, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return err + } + + cidB, err := cid.Parse(cctx.Args().Get(1)) + if err != nil { + return err + } + + cst := cbor.NewCborStore(bs) + + diffs, err := amt.Diff(cctx.Context, cst, cst, cidA, cidB, amt.UseTreeBitWidth(cctx.Uint("bitwidth"))) + if err != nil { + return err + } + + for _, d := range diffs { + switch d.Type { + case amt.Add: + color.Green("+ Add %v", d.Key) + case amt.Remove: + color.Red("- Remove %v", d.Key) + case amt.Modify: + color.Yellow("~ Modify %v", d.Key) + + var vb, va interface{} + err := cbor.DecodeInto(d.Before.Raw, &vb) + if err != nil { + return err + } + err = cbor.DecodeInto(d.After.Raw, &va) + if err != nil { + return err + } + + vjsonb, err := json.MarshalIndent(vb, " ", " ") + if err != nil { + return err + } + vjsona, err := json.MarshalIndent(va, " ", " ") + if err != nil { + return err + } + + linesb := bytes.Split(vjsonb, []byte("\n")) // - + linesa := bytes.Split(vjsona, []byte("\n")) // + + + maxLen := len(linesb) + if len(linesa) > maxLen { + maxLen = len(linesa) + } + + for i := 0; i < maxLen; i++ { + // Check if 'linesb' has run out of lines but 'linesa' hasn't + if i >= len(linesb) && i < len(linesa) { + color.Green("+ %s\n", linesa[i]) + continue + } + // Check if 'linesa' has run out of lines but 'linesb' hasn't + if i >= len(linesa) && i < len(linesb) { + color.Red("- %s\n", linesb[i]) + continue + } + // Compare lines if both slices have lines at index i + if !bytes.Equal(linesb[i], linesa[i]) { + color.Red("- %s\n", linesb[i]) + color.Green("+ %s\n", linesa[i]) + } else { + // Print the line if it is the same in both slices + fmt.Printf(" %s\n", linesb[i]) + } + } + + } + } + + return nil + }, +} diff --git a/cmd/lotus-shed/fip-0036.go b/cmd/lotus-shed/fip-0036.go deleted file mode 100644 index 4c8456c04..000000000 --- a/cmd/lotus-shed/fip-0036.go +++ /dev/null @@ -1,554 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "sort" - "strconv" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/filecoin-project/lotus/chain/actors/builtin/market" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/builtin/multisig" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -type Option uint64 - -const ( - Approve Option = 49 - Reject Option = 50 -) - -type Vote struct { - ID uint64 - OptionID Option - SignerAddress address.Address -} - -type msigVote struct { - Multisig msigBriefInfo - ApproveCount uint64 - RejectCount uint64 -} - -// https://filpoll.io/poll/16 -// snapshot height: 2162760 -// state root: bafy2bzacebdnzh43hw66bmvguk65wiwr5ssaejlq44fpdei2ysfh3eefpdlqs -var fip36PollCmd = &cli.Command{ - Name: "fip36poll", - Usage: "Process the FIP0036 FilPoll result", - ArgsUsage: "[state root, votes]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - Subcommands: []*cli.Command{ - finalResultCmd, - }, -} - -var finalResultCmd = &cli.Command{ - Name: "results", - Usage: "get poll results", - ArgsUsage: "[state root] [height] [votes json]", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "repo", - Value: "~/.lotus", - }, - }, - - Action: func(cctx *cli.Context) error { - if cctx.NArg() != 3 { - return xerrors.New("filpoll0036 results [state root] [height] [votes.json]") - } - - ctx := context.TODO() - if !cctx.Args().Present() { - return fmt.Errorf("must pass state root") - } - - sroot, err := cid.Decode(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to parse input: %w", err) - } - - fsrepo, err := repo.NewFS(cctx.String("repo")) - if err != nil { - return err - } - - lkrepo, err := fsrepo.Lock(repo.FullNode) - if err != nil { - return err - } - - defer lkrepo.Close() //nolint:errcheck - - bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) - if err != nil { - return fmt.Errorf("failed to open blockstore: %w", err) - } - - defer func() { - if c, ok := bs.(io.Closer); ok { - if err := c.Close(); err != nil { - log.Warnf("failed to close blockstore: %s", err) - } - } - }() - - mds, err := lkrepo.Datastore(context.Background(), "/metadata") - if err != nil { - return err - } - - cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) - defer cs.Close() //nolint:errcheck - - cst := cbor.NewCborStore(bs) - store := adt.WrapStore(ctx, cst) - - st, err := state.LoadStateTree(cst, sroot) - if err != nil { - return err - } - - height, err := strconv.Atoi(cctx.Args().Get(1)) - if err != nil { - return err - } - - //get all the votes' signer ID address && their vote - vj, err := homedir.Expand(cctx.Args().Get(2)) - if err != nil { - return xerrors.Errorf("fail to get votes json") - } - votes, err := getVotesMap(vj) - if err != nil { - return xerrors.Errorf("failed to get voters: %w\n", err) - } - - type minerBriefInfo struct { - rawBytePower abi.StoragePower - dealPower abi.StoragePower - balance abi.TokenAmount - } - - // power actor - pa, err := st.GetActor(power.Address) - if err != nil { - return xerrors.Errorf("failed to get power actor: %w\n", err) - } - - powerState, err := power.Load(store, pa) - if err != nil { - return xerrors.Errorf("failed to get power state: %w\n", err) - } - - //market actor - ma, err := st.GetActor(market.Address) - if err != nil { - return xerrors.Errorf("fail to get market actor: %w\n", err) - } - - marketState, err := market.Load(store, ma) - if err != nil { - return xerrors.Errorf("fail to load market state: %w\n", err) - } - - lookupId := func(addr address.Address) address.Address { - ret, err := st.LookupID(addr) - if err != nil { - panic(err) - } - - return ret - } - - // we need to build several pieces of information, as we traverse the state tree: - // a map of accounts to every msig that they are a signer of - accountsToMultisigs := make(map[address.Address][]address.Address) - // a map of multisigs to some info about them for quick lookup - msigActorsInfo := make(map[address.Address]msigBriefInfo) - // a map of actors (accounts+multisigs) to every miner that they are an owner of - ownerMap := make(map[address.Address][]address.Address) - // a map of accounts to every miner that they are a worker of - workerMap := make(map[address.Address][]address.Address) - // a map of miners to some info about them for quick lookup - minerActorsInfo := make(map[address.Address]minerBriefInfo) - // a map of client addresses to deal data stored in proposals - clientToDealStorage := make(map[address.Address]abi.StoragePower) - - fmt.Println("iterating over all actors") - count := 0 - err = st.ForEach(func(addr address.Address, act *types.Actor) error { - if count%200000 == 0 { - fmt.Println("processed ", count, " actors building maps") - } - count++ - if builtin.IsMultisigActor(act.Code) { - ms, err := multisig.Load(store, act) - if err != nil { - return fmt.Errorf("load msig failed %v", err) - - } - - // TODO: Confirm that these are always ID addresses - signers, err := ms.Signers() - if err != nil { - return xerrors.Errorf("fail to get msig signers: %w", err) - } - for _, s := range signers { - signerId := lookupId(s) - accountsToMultisigs[signerId] = append(accountsToMultisigs[signerId], addr) - } - - locked, err := ms.LockedBalance(abi.ChainEpoch(height)) - if err != nil { - return xerrors.Errorf("failed to compute locked multisig balance: %w", err) - } - - threshold, _ := ms.Threshold() - info := msigBriefInfo{ - ID: addr, - Signer: signers, - Balance: big.Max(big.Zero(), types.BigSub(act.Balance, locked)), - Threshold: threshold, - } - msigActorsInfo[addr] = info - } - - if builtin.IsStorageMinerActor(act.Code) { - m, err := miner.Load(store, act) - if err != nil { - return xerrors.Errorf("fail to load miner actor: %w", err) - } - - info, err := m.Info() - if err != nil { - return xerrors.Errorf("fail to get miner info: %w\n", err) - } - - ownerId := lookupId(info.Owner) - ownerMap[ownerId] = append(ownerMap[ownerId], addr) - - workerId := lookupId(info.Worker) - workerMap[workerId] = append(workerMap[workerId], addr) - - lockedFunds, err := m.LockedFunds() - if err != nil { - return err - } - - bal := big.Sub(act.Balance, lockedFunds.TotalLockedFunds()) - bal = big.Max(big.Zero(), bal) - - pow, ok, err := powerState.MinerPower(addr) - if err != nil { - return err - } - - if !ok { - pow.RawBytePower = big.Zero() - } - - minerActorsInfo[addr] = minerBriefInfo{ - rawBytePower: pow.RawBytePower, - // gets added up outside this loop - dealPower: big.Zero(), - balance: bal, - } - } - - return nil - }) - - if err != nil { - return err - } - - fmt.Println("iterating over proposals") - dealProposals, err := marketState.Proposals() - if err != nil { - return err - } - - dealStates, err := marketState.States() - if err != nil { - return err - } - - if err := dealProposals.ForEach(func(dealID abi.DealID, d market.DealProposal) error { - - dealState, ok, err := dealStates.Get(dealID) - if err != nil { - return err - } - if !ok || dealState.SectorStartEpoch == -1 { - // effectively a continue - return nil - } - - clientId := lookupId(d.Client) - if cd, found := clientToDealStorage[clientId]; found { - clientToDealStorage[clientId] = big.Add(cd, big.NewInt(int64(d.PieceSize))) - } else { - clientToDealStorage[clientId] = big.NewInt(int64(d.PieceSize)) - } - - providerId := lookupId(d.Provider) - mai, found := minerActorsInfo[providerId] - - if !found { - return xerrors.Errorf("didn't find miner %s", providerId) - } - - mai.dealPower = big.Add(mai.dealPower, big.NewInt(int64(d.PieceSize))) - minerActorsInfo[providerId] = mai - return nil - }); err != nil { - return xerrors.Errorf("fail to get deals") - } - - // now tabulate votes - - approveBalance := abi.NewTokenAmount(0) - rejectionBalance := abi.NewTokenAmount(0) - clientApproveBytes := big.Zero() - clientRejectBytes := big.Zero() - msigPendingVotes := make(map[address.Address]msigVote) //map[msig ID]msigVote - msigVotes := make(map[address.Address]Option) - minerVotes := make(map[address.Address]Option) - fmt.Println("counting account and multisig votes") - for _, vote := range votes { - signerId, err := st.LookupID(vote.SignerAddress) - if err != nil { - fmt.Println("voter ", vote.SignerAddress, " not found in state tree, skipping") - continue - } - - //process votes for regular accounts - accountActor, err := st.GetActor(signerId) - if err != nil { - return xerrors.Errorf("fail to get account account for signer: %w\n", err) - } - - clientBytes, ok := clientToDealStorage[signerId] - if !ok { - clientBytes = big.Zero() - } - - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, accountActor.Balance) - clientApproveBytes = big.Add(clientApproveBytes, clientBytes) - } else { - rejectionBalance = types.BigAdd(rejectionBalance, accountActor.Balance) - clientRejectBytes = big.Add(clientRejectBytes, clientBytes) - } - - if minerInfos, found := ownerMap[signerId]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = vote.OptionID - } - } - if minerInfos, found := workerMap[signerId]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = vote.OptionID - } - } - } - - //process msigs - // There is a possibility that enough signers have voted for BOTH options in the poll to be above the threshold - // Because we are iterating over votes in order they arrived, the first option to go over the threshold will win - // This is in line with onchain behaviour (consider a case where signers are competing to withdraw all the funds - // in an msig into 2 different accounts) - if mss, found := accountsToMultisigs[signerId]; found { - for _, ms := range mss { //get all the msig signer has - if _, ok := msigVotes[ms]; ok { - // msig has already voted, skip - continue - } - if mpv, found := msigPendingVotes[ms]; found { //other signers of the multisig have voted, yet the threshold has not met - if vote.OptionID == Approve { - if mpv.ApproveCount+1 == mpv.Multisig.Threshold { //met threshold - approveBalance = types.BigAdd(approveBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.ApproveCount++ - msigPendingVotes[ms] = mpv - } - } else { - if mpv.RejectCount+1 == mpv.Multisig.Threshold { //met threshold - rejectionBalance = types.BigAdd(rejectionBalance, mpv.Multisig.Balance) - delete(msigPendingVotes, ms) //threshold, can skip later signer votes - msigVotes[ms] = vote.OptionID - - } else { - mpv.RejectCount++ - msigPendingVotes[ms] = mpv - } - } - } else { //first vote received from one of the signers of the msig - msi, ok := msigActorsInfo[ms] - if !ok { - return xerrors.Errorf("didn't find msig %s in msig map", ms) - } - - if msi.Threshold == 1 { //met threshold with this signer's single vote - if vote.OptionID == Approve { - approveBalance = types.BigAdd(approveBalance, msi.Balance) - msigVotes[ms] = Approve - - } else { - rejectionBalance = types.BigAdd(rejectionBalance, msi.Balance) - msigVotes[ms] = Reject - } - } else { //threshold not met, add to pending vote - if vote.OptionID == Approve { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - ApproveCount: 1, - } - } else { - msigPendingVotes[ms] = msigVote{ - Multisig: msi, - RejectCount: 1, - } - } - } - } - } - } - } - - for s, v := range msigVotes { - if minerInfos, found := ownerMap[s]; found { - for _, minerInfo := range minerInfos { - minerVotes[minerInfo] = v - } - } - if minerInfos, found := workerMap[s]; found { - for _, minerInfo := range minerInfos { - if _, ok := minerVotes[minerInfo]; !ok { - minerVotes[minerInfo] = v - } - } - } - } - - approveRBP := big.Zero() - approveDealPower := big.Zero() - rejectionRBP := big.Zero() - rejectionDealPower := big.Zero() - fmt.Println("adding up miner votes") - for minerAddr, vote := range minerVotes { - mbi, ok := minerActorsInfo[minerAddr] - if !ok { - return xerrors.Errorf("failed to find miner info for %s", minerAddr) - } - - if vote == Approve { - approveBalance = big.Add(approveBalance, mbi.balance) - approveRBP = big.Add(approveRBP, mbi.rawBytePower) - approveDealPower = big.Add(approveDealPower, mbi.dealPower) - } else { - rejectionBalance = big.Add(rejectionBalance, mbi.balance) - rejectionRBP = big.Add(rejectionRBP, mbi.rawBytePower) - rejectionDealPower = big.Add(rejectionDealPower, mbi.dealPower) - } - } - - fmt.Println("Total acceptance token: ", approveBalance) - fmt.Println("Total rejection token: ", rejectionBalance) - - fmt.Println("Total acceptance SP deal power: ", approveDealPower) - fmt.Println("Total rejection SP deal power: ", rejectionDealPower) - - fmt.Println("Total acceptance SP rb power: ", approveRBP) - fmt.Println("Total rejection SP rb power: ", rejectionRBP) - - fmt.Println("Total acceptance Client rb power: ", clientApproveBytes) - fmt.Println("Total rejection Client rb power: ", clientRejectBytes) - - fmt.Println("\n\nFinal results **drumroll**") - if rejectionBalance.GreaterThanEqual(big.Mul(approveBalance, big.NewInt(3))) { - fmt.Println("token holders VETO FIP-0036!") - } else if approveBalance.LessThanEqual(rejectionBalance) { - fmt.Println("token holders REJECT FIP-0036") - } else { - fmt.Println("token holders ACCEPT FIP-0036") - } - - if rejectionDealPower.GreaterThanEqual(big.Mul(approveDealPower, big.NewInt(3))) { - fmt.Println("SPs by deal data stored VETO FIP-0036!") - } else if approveDealPower.LessThanEqual(rejectionDealPower) { - fmt.Println("SPs by deal data stored REJECT FIP-0036") - } else { - fmt.Println("SPs by deal data stored ACCEPT FIP-0036") - } - - if rejectionRBP.GreaterThanEqual(big.Mul(approveRBP, big.NewInt(3))) { - fmt.Println("SPs by total raw byte power VETO FIP-0036!") - } else if approveRBP.LessThanEqual(rejectionRBP) { - fmt.Println("SPs by total raw byte power REJECT FIP-0036") - } else { - fmt.Println("SPs by total raw byte power ACCEPT FIP-0036") - } - - if clientRejectBytes.GreaterThanEqual(big.Mul(clientApproveBytes, big.NewInt(3))) { - fmt.Println("Storage Clients VETO FIP-0036!") - } else if clientApproveBytes.LessThanEqual(clientRejectBytes) { - fmt.Println("Storage Clients REJECT FIP-0036") - } else { - fmt.Println("Storage Clients ACCEPT FIP-0036") - } - - return nil - }, -} - -// Returns voted sorted by votes from earliest to latest -func getVotesMap(file string) ([]Vote, error) { - var votes []Vote - vb, err := os.ReadFile(file) - if err != nil { - return nil, xerrors.Errorf("read vote: %w", err) - } - - if err := json.Unmarshal(vb, &votes); err != nil { - return nil, xerrors.Errorf("unmarshal vote: %w", err) - } - - sort.SliceStable(votes, func(i, j int) bool { - return votes[i].ID < votes[j].ID - }) - - return votes, nil -} diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go index be7d43e05..620933e25 100644 --- a/cmd/lotus-shed/indexes.go +++ b/cmd/lotus-shed/indexes.go @@ -9,13 +9,11 @@ import ( "strings" "github.com/mitchellh/go-homedir" - "github.com/multiformats/go-varint" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtintypes "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" @@ -109,6 +107,7 @@ var backfillEventsCmd = &cli.Command{ addressLookups := make(map[abi.ActorID]address.Address) + // TODO: We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { // we only want to match using f4 addresses idAddr, err := address.NewIDAddress(uint64(emitter)) @@ -118,18 +117,9 @@ var backfillEventsCmd = &cli.Command{ actor, err := api.StateGetActor(ctx, idAddr, ts.Key()) if err != nil || actor.Address == nil { - return address.Undef, false + return idAddr, true } - // if robust address is not f4 then we won't match against it so bail early - if actor.Address.Protocol() != address.Delegated { - return address.Undef, false - } - - // we have an f4 address, make sure it's assigned by the EAM - if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID { - return address.Undef, false - } return *actor.Address, true } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index a5b66a096..2b3b18670 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -84,7 +84,6 @@ func main() { diffCmd, itestdCmd, msigCmd, - fip36PollCmd, invariantsCmd, gasTraceCmd, replayOfflineCmd, @@ -92,6 +91,7 @@ func main() { FevmAnalyticsCmd, mismatchesCmd, blockCmd, + adlCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index 4436e3c40..6fb1566b6 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -387,7 +387,7 @@ var marketDealsTotalStorageCmd = &cli.Command{ count := 0 for _, deal := range deals { - if market.IsDealActive(deal.State) { + if market.IsDealActive(deal.State.Iface()) { dealStorage := big.NewIntUnsigned(uint64(deal.Proposal.PieceSize)) total = big.Add(total, dealStorage) count++ diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 96e4747b7..febe833d7 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -1,27 +1,41 @@ package main import ( + "bytes" "context" + "encoding/json" "fmt" "os" "path/filepath" "strconv" "time" + "github.com/fatih/color" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" + cbornode "github.com/ipfs/go-ipld-cbor" + "github.com/ipld/go-car" "github.com/urfave/cli/v2" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-hamt-ipld/v3" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" v10 "github.com/filecoin-project/go-state-types/builtin/v10" v11 "github.com/filecoin-project/go-state-types/builtin/v11" v12 "github.com/filecoin-project/go-state-types/builtin/v12" + v13 "github.com/filecoin-project/go-state-types/builtin/v13" + market13 "github.com/filecoin-project/go-state-types/builtin/v13/market" + adt13 "github.com/filecoin-project/go-state-types/builtin/v13/util/adt" market8 "github.com/filecoin-project/go-state-types/builtin/v8/market" adt8 "github.com/filecoin-project/go-state-types/builtin/v8/util/adt" v9 "github.com/filecoin-project/go-state-types/builtin/v9" @@ -53,6 +67,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/must" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) @@ -72,6 +87,9 @@ var migrationsCmd = &cli.Command{ &cli.BoolFlag{ Name: "check-invariants", }, + &cli.StringFlag{ + Name: "export-bad-migration", + }, }, Action: func(cctx *cli.Context) error { fmt.Println("REMINDER: If you are running this, you likely want to ALSO run the continuity testing tool!") @@ -215,6 +233,31 @@ var migrationsCmd = &cli.Command{ cachedMigrationTime := time.Since(startTime) if newCid1 != newCid2 { + { + if err := printStateDiff(ctx, network.Version(nv), newCid2, newCid1, bs); err != nil { + fmt.Println("failed to print state diff: ", err) + } + } + + if cctx.IsSet("export-bad-migration") { + fi, err := os.Create(cctx.String("export-bad-migration")) + if err != nil { + return xerrors.Errorf("opening the output file: %w", err) + } + + defer fi.Close() //nolint:errcheck + + roots := []cid.Cid{newCid1, newCid2} + + dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + err = car.WriteCarWithWalker(ctx, dag, roots, fi, carWalkFunc) + if err != nil { + return err + } + + fmt.Println("exported bad migration to ", cctx.String("export-bad-migration")) + } + return xerrors.Errorf("got different results with and without the cache: %s, %s", newCid1, newCid2) } @@ -246,6 +289,8 @@ func getMigrationFuncsForNetwork(nv network.Version) (UpgradeActorsFunc, PreUpgr return filcns.UpgradeActorsV11, filcns.PreUpgradeActorsV11, checkNv19Invariants, nil case network.Version21: return filcns.UpgradeActorsV12, filcns.PreUpgradeActorsV12, checkNv21Invariants, nil + case network.Version22: + return filcns.UpgradeActorsV13, filcns.PreUpgradeActorsV13, checkNv22Invariants, nil default: return nil, nil, nil, xerrors.Errorf("migration not implemented for nv%d", nv) } @@ -255,6 +300,357 @@ type UpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.Migrat type PreUpgradeActorsFunc = func(context.Context, *stmgr.StateManager, stmgr.MigrationCache, cid.Cid, abi.ChainEpoch, *types.TipSet) error type CheckInvariantsFunc = func(context.Context, cid.Cid, cid.Cid, blockstore.Blockstore, abi.ChainEpoch) error +func printStateDiff(ctx context.Context, nv network.Version, newCid1, newCid2 cid.Cid, bs blockstore.Blockstore) error { + // migration diff + var sra, srb types.StateRoot + cst := cbornode.NewCborStore(bs) + + if err := cst.Get(ctx, newCid1, &sra); err != nil { + return err + } + if err := cst.Get(ctx, newCid2, &srb); err != nil { + return err + } + + if sra.Version != srb.Version { + fmt.Println("state root versions do not match: ", sra.Version, srb.Version) + } + if sra.Info != srb.Info { + fmt.Println("state root infos do not match: ", sra.Info, srb.Info) + } + if sra.Actors != srb.Actors { + fmt.Println("state root actors do not match: ", sra.Actors, srb.Actors) + if err := printActorsDiff(ctx, cst, nv, sra.Actors, srb.Actors); err != nil { + return err + } + } + + return nil +} + +func printActorsDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, a, b cid.Cid) error { + // actor diff, a b are a hamt + + diffs, err := hamt.Diff(ctx, cst, cst, a, b, hamt.UseTreeBitWidth(builtin.DefaultHamtBitwidth)) + if err != nil { + return err + } + + keyParser := func(k string) (interface{}, error) { + return address.NewFromBytes([]byte(k)) + } + + for _, d := range diffs { + switch d.Type { + case hamt.Add: + color.Green("+ Add %v", must.One(keyParser(d.Key))) + case hamt.Remove: + color.Red("- Remove %v", must.One(keyParser(d.Key))) + case hamt.Modify: + addr := must.One(keyParser(d.Key)).(address.Address) + color.Yellow("~ Modify %v", addr) + var aa, bb types.ActorV5 + + if err := aa.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := bb.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + if err := printActorDiff(ctx, cst, nv, addr, aa, bb); err != nil { + return err + } + } + } + + return nil +} + +func printActorDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, addr address.Address, a, b types.ActorV5) error { + if a.Code != b.Code { + fmt.Println(" Code: ", a.Code, b.Code) + } + if a.Head != b.Head { + fmt.Println(" Head: ", a.Head, b.Head) + } + if a.Nonce != b.Nonce { + fmt.Println(" Nonce: ", a.Nonce, b.Nonce) + } + if big.Cmp(a.Balance, b.Balance) == 0 { + fmt.Println(" Balance: ", a.Balance, b.Balance) + } + + switch addr.String() { + case "f05": + if err := printMarketActorDiff(ctx, cst, nv, a.Head, b.Head); err != nil { + return err + } + default: + fmt.Println("no logic to diff actor state for ", addr) + } + + return nil +} + +func printMarketActorDiff(ctx context.Context, cst *cbornode.BasicIpldStore, nv network.Version, a, b cid.Cid) error { + if nv != network.Version22 { + return xerrors.Errorf("market actor diff not implemented for nv%d", nv) + } + + var ma, mb market13.State + if err := cst.Get(ctx, a, &ma); err != nil { + return err + } + if err := cst.Get(ctx, b, &mb); err != nil { + return err + } + + if ma.Proposals != mb.Proposals { + fmt.Println(" Proposals: ", ma.Proposals, mb.Proposals) + } + if ma.States != mb.States { + fmt.Println(" States: ", ma.States, mb.States) + + // diff the AMTs + amtDiff, err := amt.Diff(ctx, cst, cst, ma.States, mb.States, amt.UseTreeBitWidth(market13.StatesAmtBitwidth)) + if err != nil { + return err + } + + proposalsArrA, err := adt13.AsArray(adt8.WrapStore(ctx, cst), ma.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return err + } + proposalsArrB, err := adt13.AsArray(adt8.WrapStore(ctx, cst), mb.Proposals, market13.ProposalsAmtBitwidth) + if err != nil { + return err + } + + for _, d := range amtDiff { + switch d.Type { + case amt.Add: + color.Green(" state + Add %v", d.Key) + case amt.Remove: + color.Red(" state - Remove %v", d.Key) + case amt.Modify: + color.Yellow(" state ~ Modify %v", d.Key) + + var a, b market13.DealState + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + ja, err := json.Marshal(a) + if err != nil { + return err + } + jb, err := json.Marshal(b) + if err != nil { + return err + } + + fmt.Println(" A: ", string(ja)) + fmt.Println(" B: ", string(jb)) + + var propA, propB market13.DealProposal + + if _, err := proposalsArrA.Get(d.Key, &propA); err != nil { + return err + } + if _, err := proposalsArrB.Get(d.Key, &propB); err != nil { + return err + } + + pab, err := json.Marshal(propA) + if err != nil { + return err + } + pbb, err := json.Marshal(propB) + if err != nil { + return err + } + if string(pab) != string(pbb) { + fmt.Println(" PropA: ", string(pab)) + fmt.Println(" PropB: ", string(pbb)) + } else { + fmt.Println(" Prop: ", string(pab)) + } + + } + } + } + if ma.PendingProposals != mb.PendingProposals { + fmt.Println(" PendingProposals: ", ma.PendingProposals, mb.PendingProposals) + } + if ma.EscrowTable != mb.EscrowTable { + fmt.Println(" EscrowTable: ", ma.EscrowTable, mb.EscrowTable) + } + if ma.LockedTable != mb.LockedTable { + fmt.Println(" LockedTable: ", ma.LockedTable, mb.LockedTable) + } + if ma.NextID != mb.NextID { + fmt.Println(" NextID: ", ma.NextID, mb.NextID) + } + if ma.DealOpsByEpoch != mb.DealOpsByEpoch { + fmt.Println(" DealOpsByEpoch: ", ma.DealOpsByEpoch, mb.DealOpsByEpoch) + } + if ma.LastCron != mb.LastCron { + fmt.Println(" LastCron: ", ma.LastCron, mb.LastCron) + } + if ma.TotalClientLockedCollateral != mb.TotalClientLockedCollateral { + fmt.Println(" TotalClientLockedCollateral: ", ma.TotalClientLockedCollateral, mb.TotalClientLockedCollateral) + } + if ma.TotalProviderLockedCollateral != mb.TotalProviderLockedCollateral { + fmt.Println(" TotalProviderLockedCollateral: ", ma.TotalProviderLockedCollateral, mb.TotalProviderLockedCollateral) + } + if ma.TotalClientStorageFee != mb.TotalClientStorageFee { + fmt.Println(" TotalClientStorageFee: ", ma.TotalClientStorageFee, mb.TotalClientStorageFee) + } + if ma.PendingDealAllocationIds != mb.PendingDealAllocationIds { + fmt.Println(" PendingDealAllocationIds: ", ma.PendingDealAllocationIds, mb.PendingDealAllocationIds) + } + if ma.ProviderSectors != mb.ProviderSectors { + fmt.Println(" ProviderSectors: ", ma.ProviderSectors, mb.ProviderSectors) + + // diff the HAMTs + hamtDiff, err := hamt.Diff(ctx, cst, cst, ma.ProviderSectors, mb.ProviderSectors, hamt.UseTreeBitWidth(market13.ProviderSectorsHamtBitwidth)) + if err != nil { + return err + } + + for _, d := range hamtDiff { + spIDk := must.One(abi.ParseUIntKey(d.Key)) + + switch d.Type { + case hamt.Add: + color.Green(" ProviderSectors + Add f0%v", spIDk) + + var b cbg.CborCid + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + fmt.Println(" |-B: ", cid.Cid(b).String()) + + inner, err := adt13.AsMap(adt8.WrapStore(ctx, cst), cid.Cid(b), market13.ProviderSectorsHamtBitwidth) + if err != nil { + return err + } + + var ids market13.SectorDealIDs + err = inner.ForEach(&ids, func(k string) error { + sectorNumber := must.One(abi.ParseUIntKey(k)) + + color.Green(" |-- ProviderSectors + Add %v", sectorNumber) + fmt.Printf(" |+: %v\n", ids) + + return nil + }) + if err != nil { + return err + } + case hamt.Remove: + color.Red(" ProviderSectors - Remove f0%v", spIDk) + case hamt.Modify: + color.Yellow(" ProviderSectors ~ Modify f0%v", spIDk) + + var a, b cbg.CborCid + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + fmt.Println(" |-A: ", cid.Cid(b).String()) + fmt.Println(" |-B: ", cid.Cid(a).String()) + + // diff the inner HAMTs + innerHamtDiff, err := hamt.Diff(ctx, cst, cst, cid.Cid(a), cid.Cid(b), hamt.UseTreeBitWidth(market13.ProviderSectorsHamtBitwidth)) + if err != nil { + return err + } + + for _, d := range innerHamtDiff { + sectorNumber := must.One(abi.ParseUIntKey(d.Key)) + + switch d.Type { + case hamt.Add: + var b market13.SectorDealIDs + + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + color.Green(" |-- ProviderSectors + Add %v", sectorNumber) + fmt.Printf(" |B: %v\n", b) + case hamt.Remove: + var a market13.SectorDealIDs + + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + + color.Red(" |-- ProviderSectors - Remove %v", sectorNumber) + fmt.Printf(" |A: %v\n", a) + case hamt.Modify: + var a, b market13.SectorDealIDs + + if err := a.UnmarshalCBOR(bytes.NewReader(d.Before.Raw)); err != nil { + return err + } + if err := b.UnmarshalCBOR(bytes.NewReader(d.After.Raw)); err != nil { + return err + } + + color.Yellow(" |-- ProviderSectors ~ Modify %v", sectorNumber) + fmt.Printf(" |A: %v\n", a) + fmt.Printf(" |B: %v\n", b) + } + } + } + } + } + + return nil +} + +func checkNv22Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { + + actorStore := store.ActorStore(ctx, bs) + startTime := time.Now() + + // Load the new state root. + var newStateRoot types.StateRoot + if err := actorStore.Get(ctx, newStateRootCid, &newStateRoot); err != nil { + return xerrors.Errorf("failed to decode state root: %w", err) + } + + actorCodeCids, err := actors.GetActorCodeIDs(actorstypes.Version13) + if err != nil { + return err + } + newActorTree, err := builtin.LoadTree(actorStore, newStateRoot.Actors) + if err != nil { + return err + } + messages, err := v13.CheckStateInvariants(newActorTree, epoch, actorCodeCids) + if err != nil { + return xerrors.Errorf("checking state invariants: %w", err) + } + + for _, message := range messages.Messages() { + fmt.Println("got the following error: ", message) + } + + fmt.Println("completed invariant checks, took ", time.Since(startTime)) + + return nil +} func checkNv21Invariants(ctx context.Context, oldStateRootCid cid.Cid, newStateRootCid cid.Cid, bs blockstore.Blockstore, epoch abi.ChainEpoch) error { actorStore := store.ActorStore(ctx, bs) diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go index 2f9b4ecf1..a8bb93744 100644 --- a/cmd/lotus-shed/miner.go +++ b/cmd/lotus-shed/miner.go @@ -553,7 +553,7 @@ var sendInvalidWindowPoStCmd = &cli.Command{ return xerrors.Errorf("serializing params: %w", err) } - fmt.Printf("submitting bad PoST for %d partitions\n", len(partitionIndices)) + fmt.Printf("submitting bad PoST for %d paritions\n", len(partitionIndices)) smsg, err := api.MpoolPushMessage(ctx, &types.Message{ From: minfo.Worker, To: maddr, diff --git a/cmd/lotus-shed/shedgen/cbor_gen.go b/cmd/lotus-shed/shedgen/cbor_gen.go index f2a79fe7d..10b41827f 100644 --- a/cmd/lotus-shed/shedgen/cbor_gen.go +++ b/cmd/lotus-shed/shedgen/cbor_gen.go @@ -31,7 +31,7 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { } // t.Sub ([]cid.Cid) (slice) - if len("Sub") > cbg.MaxLength { + if len("Sub") > 8192 { return xerrors.Errorf("Value in field \"Sub\" was too long") } @@ -42,7 +42,7 @@ func (t *CarbNode) MarshalCBOR(w io.Writer) error { return err } - if len(t.Sub) > cbg.MaxLength { + if len(t.Sub) > 8192 { return xerrors.Errorf("Slice value in field t.Sub was too long") } @@ -88,7 +88,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -105,7 +105,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Sub: array too large (%d)", extra) } @@ -136,6 +136,7 @@ func (t *CarbNode) UnmarshalCBOR(r io.Reader) (err error) { t.Sub[i] = c } + } } diff --git a/conformance/chaos/cbor_gen.go b/conformance/chaos/cbor_gen.go index d74ae0946..09d48ad10 100644 --- a/conformance/chaos/cbor_gen.go +++ b/conformance/chaos/cbor_gen.go @@ -37,7 +37,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -49,7 +49,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { } // t.Unmarshallable ([]*chaos.UnmarshallableCBOR) (slice) - if len(t.Unmarshallable) > cbg.MaxLength { + if len(t.Unmarshallable) > 8192 { return xerrors.Errorf("Slice value in field t.Unmarshallable was too long") } @@ -60,6 +60,7 @@ func (t *State) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -90,7 +91,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { // t.Value (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -104,7 +105,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Unmarshallable: array too large (%d)", extra) } @@ -142,9 +143,9 @@ func (t *State) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - return nil } @@ -174,7 +175,7 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { } // t.Addrs ([]address.Address) (slice) - if len(t.Addrs) > cbg.MaxLength { + if len(t.Addrs) > 8192 { return xerrors.Errorf("Slice value in field t.Addrs was too long") } @@ -185,10 +186,11 @@ func (t *CallerValidationArgs) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Types ([]cid.Cid) (slice) - if len(t.Types) > cbg.MaxLength { + if len(t.Types) > 8192 { return xerrors.Errorf("Slice value in field t.Types was too long") } @@ -231,10 +233,10 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Branch (chaos.CallerValidationBranch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -260,7 +262,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Addrs: array too large (%d)", extra) } @@ -288,9 +290,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Types ([]cid.Cid) (slice) maj, extra, err = cr.ReadHeader() @@ -298,7 +300,7 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Types: array too large (%d)", extra) } @@ -329,9 +331,9 @@ func (t *CallerValidationArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Types[i] = c } + } } - return nil } @@ -562,7 +564,7 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error { } // t.Params ([]uint8) (slice) - if len(t.Params) > cbg.ByteArrayMaxLen { + if len(t.Params) > 2097152 { return xerrors.Errorf("Byte array in field t.Params was too long") } @@ -570,9 +572,10 @@ func (t *SendArgs) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Params[:]); err != nil { + if _, err := cw.Write(t.Params); err != nil { return err } + return nil } @@ -638,7 +641,7 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Params: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -649,9 +652,10 @@ func (t *SendArgs) UnmarshalCBOR(r io.Reader) (err error) { t.Params = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Params[:]); err != nil { + if _, err := io.ReadFull(cr, t.Params); err != nil { return err } + return nil } @@ -670,7 +674,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { } // t.Return (builtin.CBORBytes) (slice) - if len(t.Return) > cbg.ByteArrayMaxLen { + if len(t.Return) > 2097152 { return xerrors.Errorf("Byte array in field t.Return was too long") } @@ -678,7 +682,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Return[:]); err != nil { + if _, err := cw.Write(t.Return); err != nil { return err } @@ -692,6 +696,7 @@ func (t *SendReturn) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -725,7 +730,7 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Return: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -736,16 +741,17 @@ func (t *SendReturn) UnmarshalCBOR(r io.Reader) (err error) { t.Return = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Return[:]); err != nil { + if _, err := io.ReadFull(cr, t.Return); err != nil { return err } + // t.Code (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -782,7 +788,7 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -803,6 +809,7 @@ func (t *MutateStateArgs) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -832,7 +839,7 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Value (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -842,10 +849,10 @@ func (t *MutateStateArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Branch (chaos.MutateStateBranch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -893,7 +900,7 @@ func (t *AbortWithArgs) MarshalCBOR(w io.Writer) error { } // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { + if len(t.Message) > 8192 { return xerrors.Errorf("Value in field t.Message was too long") } @@ -937,10 +944,10 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Code (exitcode.ExitCode) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -962,7 +969,7 @@ func (t *AbortWithArgs) UnmarshalCBOR(r io.Reader) (err error) { // t.Message (string) (string) { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1094,10 +1101,10 @@ func (t *InspectRuntimeReturn) UnmarshalCBOR(r io.Reader) (err error) { // t.CurrEpoch (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index 57070caed..b133930bc 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -475,7 +475,7 @@ Inputs: ], "Bw==", 10101, - 21 + 22 ] ``` @@ -826,8 +826,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -1426,8 +1425,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ] @@ -2910,6 +2908,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } ] @@ -3212,6 +3218,14 @@ Inputs: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } @@ -3556,6 +3570,14 @@ Response: "StartEpoch": 10101, "EndEpoch": 10101 }, + "PieceActivationManifest": { + "CID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 2032, + "VerifiedAllocationKey": null, + "Notify": null + }, "KeepUnsealed": true } } diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index 9110c1c3b..19c3ca481 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -170,6 +170,8 @@ * [StateDealProviderCollateralBounds](#StateDealProviderCollateralBounds) * [StateDecodeParams](#StateDecodeParams) * [StateGetActor](#StateGetActor) + * [StateGetAllAllocations](#StateGetAllAllocations) + * [StateGetAllClaims](#StateGetAllClaims) * [StateGetAllocation](#StateGetAllocation) * [StateGetAllocationForPendingDeal](#StateGetAllocationForPendingDeal) * [StateGetAllocations](#StateGetAllocations) @@ -4732,7 +4734,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -4747,7 +4749,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -4878,16 +4880,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -4907,16 +4920,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -5118,16 +5142,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -5147,16 +5182,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -5269,6 +5315,50 @@ Response: } ``` +### StateGetAllAllocations +StateGetAllAllocations returns the all the allocations available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetAllClaims +StateGetAllClaims returns the all the claims available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetAllocation StateGetAllocation returns the allocation for a given address and allocation ID. @@ -5469,7 +5559,9 @@ Response: "UpgradeHyggeHeight": 10101, "UpgradeLightningHeight": 10101, "UpgradeThunderHeight": 10101, - "UpgradeWatermelonHeight": 10101 + "UpgradeWatermelonHeight": 10101, + "UpgradeDragonHeight": 10101, + "UpgradePhoenixHeight": 10101 }, "Eip155ChainID": 123 } @@ -5750,8 +5842,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -5829,8 +5920,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` @@ -6395,7 +6485,7 @@ Inputs: ] ``` -Response: `21` +Response: `22` ### StateReadState StateReadState returns the indicated actor's state. @@ -6518,16 +6608,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -6547,16 +6648,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 53b5ddbe6..33315e221 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -115,6 +115,8 @@ * [GasEstimateGasLimit](#GasEstimateGasLimit) * [GasEstimateGasPremium](#GasEstimateGasPremium) * [GasEstimateMessageGas](#GasEstimateMessageGas) +* [Get](#Get) + * [GetActorEventsRaw](#GetActorEventsRaw) * [I](#I) * [ID](#ID) * [Log](#Log) @@ -211,6 +213,9 @@ * [PaychVoucherCreate](#PaychVoucherCreate) * [PaychVoucherList](#PaychVoucherList) * [PaychVoucherSubmit](#PaychVoucherSubmit) +* [Raft](#Raft) + * [RaftLeader](#RaftLeader) + * [RaftState](#RaftState) * [Start](#Start) * [StartTime](#StartTime) * [State](#State) @@ -227,8 +232,11 @@ * [StateDecodeParams](#StateDecodeParams) * [StateEncodeParams](#StateEncodeParams) * [StateGetActor](#StateGetActor) + * [StateGetAllAllocations](#StateGetAllAllocations) + * [StateGetAllClaims](#StateGetAllClaims) * [StateGetAllocation](#StateGetAllocation) * [StateGetAllocationForPendingDeal](#StateGetAllocationForPendingDeal) + * [StateGetAllocationIdForPendingDeal](#StateGetAllocationIdForPendingDeal) * [StateGetAllocations](#StateGetAllocations) * [StateGetBeaconEntry](#StateGetBeaconEntry) * [StateGetClaim](#StateGetClaim) @@ -276,6 +284,8 @@ * [StateVerifiedRegistryRootKey](#StateVerifiedRegistryRootKey) * [StateVerifierStatus](#StateVerifierStatus) * [StateWaitMsg](#StateWaitMsg) +* [Subscribe](#Subscribe) + * [SubscribeActorEventsRaw](#SubscribeActorEventsRaw) * [Sync](#Sync) * [SyncCheckBad](#SyncCheckBad) * [SyncCheckpoint](#SyncCheckpoint) @@ -3076,9 +3086,23 @@ Inputs: `null` Response: `false` ### EthTraceBlock -TraceAPI related methods +Returns an OpenEthereum-compatible trace of the given block (implementing `trace_block`), +translating Filecoin semantics into Ethereum semantics and tracing both EVM and FVM calls. -Returns traces created at given block +Features: + +- FVM actor create events, calls, etc. show up as if they were EVM smart contract events. +- Native FVM call inputs are ABI-encoded (Solidity ABI) as if they were calls to a + `handle_filecoin_method(uint64 method, uint64 codec, bytes params)` function + (where `codec` is the IPLD codec of `params`). +- Native FVM call outputs (return values) are ABI-encoded as `(uint32 exit_code, uint64 + codec, bytes output)` where `codec` is the IPLD codec of `output`. + +Limitations (for now): + +1. Block rewards are not included in the trace. +2. SELFDESTRUCT operations are not included in the trace. +3. EVM smart contract "create" events always specify `0xfe` as the "code" for newly created EVM smart contracts. Perms: read @@ -3094,23 +3118,14 @@ Response: ```json [ { - "action": { - "callType": "string value", - "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "gas": "0x5", - "input": "0x07", - "value": "0x0" - }, - "result": { - "gasUsed": "0x5", - "output": "0x07" - }, + "type": "string value", + "error": "string value", "subtraces": 123, "traceAddress": [ 123 ], - "Type": "string value", + "action": {}, + "result": {}, "blockHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", "blockNumber": 9, "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", @@ -3143,23 +3158,14 @@ Response: "stateDiff": "string value", "trace": [ { - "action": { - "callType": "string value", - "from": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "to": "0x5cbeecf99d3fdb3f25e309cc264f240bb0664031", - "gas": "0x5", - "input": "0x07", - "value": "0x0" - }, - "result": { - "gasUsed": "0x5", - "output": "0x07" - }, + "type": "string value", + "error": "string value", "subtraces": 123, "traceAddress": [ 123 ], - "Type": "string value" + "action": {}, + "result": {} } ], "transactionHash": "0x37690cfec6c1bf4c3b9288c7a5d783e98731e90b0a4c177c2a374c7a9427355e", @@ -3380,6 +3386,72 @@ Response: } ``` +## Get + + +### GetActorEventsRaw +GetActorEventsRaw returns all user-programmed and built-in actor events that match the given +filter. +This is a request/response API. +Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange +configuration options and also the amount of historical data available in the node. + +This is an EXPERIMENTAL API and may be subject to change. + + +Perms: read + +Inputs: +```json +[ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } +] +``` + +Response: +```json +[ + { + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } +] +``` + ## I @@ -6179,6 +6251,33 @@ Response: } ``` +## Raft + + +### RaftLeader + + +Perms: read + +Inputs: `null` + +Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"` + +### RaftState + + +Perms: read + +Inputs: `null` + +Response: +```json +{ + "NonceMap": {}, + "MsgUuids": {} +} +``` + ## Start @@ -6229,7 +6328,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -6244,7 +6343,7 @@ Perms: read Inputs: ```json [ - 21 + 22 ] ``` @@ -6375,16 +6474,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -6404,16 +6514,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -6615,16 +6736,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -6644,16 +6776,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -6817,6 +6960,50 @@ Response: } ``` +### StateGetAllAllocations +StateGetAllAllocations returns the all the allocations available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + +### StateGetAllClaims +StateGetAllClaims returns the all the claims available in verified registry actor. + + +Perms: read + +Inputs: +```json +[ + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `{}` + ### StateGetAllocation StateGetAllocation returns the allocation for a given address and allocation ID. @@ -6891,6 +7078,29 @@ Response: } ``` +### StateGetAllocationIdForPendingDeal +StateGetAllocationIdForPendingDeal is like StateGetAllocationForPendingDeal except it returns the allocation ID + + +Perms: read + +Inputs: +```json +[ + 5432, + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ] +] +``` + +Response: `0` + ### StateGetAllocations StateGetAllocations returns the all the allocations for a given client. @@ -7041,7 +7251,9 @@ Response: "UpgradeHyggeHeight": 10101, "UpgradeLightningHeight": 10101, "UpgradeThunderHeight": 10101, - "UpgradeWatermelonHeight": 10101 + "UpgradeWatermelonHeight": 10101, + "UpgradeDragonHeight": 10101, + "UpgradePhoenixHeight": 10101 }, "Eip155ChainID": 123 } @@ -7348,8 +7560,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } } @@ -7427,8 +7638,7 @@ Response: "State": { "SectorStartEpoch": 10101, "LastUpdatedEpoch": 10101, - "SlashEpoch": 10101, - "VerifiedClaim": 0 + "SlashEpoch": 10101 } } ``` @@ -8021,7 +8231,7 @@ Inputs: ] ``` -Response: `21` +Response: `22` ### StateReadState StateReadState returns the indicated actor's state. @@ -8144,16 +8354,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -8173,16 +8394,27 @@ Response: "Params": "Ynl0ZSBhcnJheQ==", "ParamsCodec": 42, "GasLimit": 42, - "ReadOnly": true, - "CodeCid": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - } + "ReadOnly": true }, "MsgRct": { "ExitCode": 0, "Return": "Ynl0ZSBhcnJheQ==", "ReturnCodec": 42 }, + "InvokedActor": { + "Id": 1000, + "State": { + "Code": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Head": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Nonce": 42, + "Balance": "0", + "Address": "f01234" + } + }, "GasCharges": [ { "Name": "string value", @@ -8596,6 +8828,77 @@ Response: } ``` +## Subscribe + + +### SubscribeActorEventsRaw +SubscribeActorEventsRaw returns a long-lived stream of all user-programmed and built-in actor +events that match the given filter. +Events that match the given filter are written to the stream in real-time as they are emitted +from the FVM. +The response stream is closed when the client disconnects, when a ToHeight is specified and is +reached, or if there is an error while writing an event to the stream. +This API also allows clients to read all historical events matching the given filter before any +real-time events are written to the response stream if the filter specifies an earlier +FromHeight. +Results available from this API may be limited by the MaxFilterResults and MaxFilterHeightRange +configuration options and also the amount of historical data available in the node. + +Note: this API is only available via websocket connections. +This is an EXPERIMENTAL API and may be subject to change. + + +Perms: read + +Inputs: +```json +[ + { + "addresses": [ + "f01234" + ], + "fields": { + "abc": [ + { + "codec": 81, + "value": "ZGRhdGE=" + } + ] + }, + "fromHeight": 1010, + "toHeight": 1020 + } +] +``` + +Response: +```json +{ + "entries": [ + { + "Flags": 7, + "Key": "string value", + "Codec": 42, + "Value": "Ynl0ZSBhcnJheQ==" + } + ], + "emitter": "f01234", + "reverted": true, + "height": 10101, + "tipsetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "msgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +} +``` + ## Sync The Sync method group contains methods for interacting with and observing the lotus sync service. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 2ea89e6ce..4a5ec0578 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.26.2 COMMANDS: init Initialize a lotus miner repo @@ -66,7 +66,6 @@ OPTIONS: --no-local-storage don't use storageminer repo for sector storage (default: false) --gas-premium value set gas premium for initialization messages in AttoFIL (default: "0") --from value select which address to send actor creation message from - --confidence value number of block confirmations to wait for (default: 5) --help, -h show help ``` diff --git a/documentation/en/cli-lotus-provider.md b/documentation/en/cli-lotus-provider.md index 5e5864107..aed4a5d2a 100644 --- a/documentation/en/cli-lotus-provider.md +++ b/documentation/en/cli-lotus-provider.md @@ -7,14 +7,13 @@ USAGE: lotus-provider [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.26.2 COMMANDS: run Start a lotus provider process stop Stop a running lotus provider config Manage node config by layers. The layer 'base' will always be applied. test Utility functions for testing - web Start lotus provider web interface version Print version help, h Shows a list of commands or help for one command DEVELOPER: @@ -248,25 +247,6 @@ OPTIONS: --help, -h show help ``` -## lotus-provider web -``` -NAME: - lotus-provider web - Start lotus provider web interface - -USAGE: - lotus-provider web [command options] [arguments...] - -DESCRIPTION: - Start an instance of lotus provider web interface. - This creates the 'web' layer if it does not exist, then calls run with that layer. - -OPTIONS: - --listen value Address to listen on (default: "127.0.0.1:4701") - --layers value [ --layers value ] list of layers to be interpreted (atop defaults). Default: base. Web will be added (default: "base") - --nosync don't check full-node sync status (default: false) - --help, -h show help -``` - ## lotus-provider version ``` NAME: diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index 0e0fee157..d2912590b 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.26.2 COMMANDS: run Start lotus worker diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index ff62980dc..1811fc0c4 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.25.3-dev + 1.26.2 COMMANDS: daemon Start a lotus daemon process @@ -1188,8 +1188,8 @@ COMMANDS: check-client-datacap check verified client remaining bytes check-notary-datacap check a notary's remaining bytes sign-remove-data-cap-proposal allows a notary to sign a Remove Data Cap Proposal - list-allocations List allocations made by client - list-claims List claims made by provider + list-allocations List allocations available in verified registry actor or made by a client if specified + list-claims List claims available in verified registry actor or made by provider if specified remove-expired-allocations remove expired allocations (if no allocations are specified all eligible allocations are removed) remove-expired-claims remove expired claims (if no claims are specified all eligible claims are removed) help, h Shows a list of commands or help for one command @@ -1275,20 +1275,21 @@ OPTIONS: ### lotus filplus list-allocations ``` NAME: - lotus filplus list-allocations - List allocations made by client + lotus filplus list-allocations - List allocations available in verified registry actor or made by a client if specified USAGE: lotus filplus list-allocations [command options] clientAddress OPTIONS: --expired list only expired allocations (default: false) + --json output results in json format (default: false) --help, -h show help ``` ### lotus filplus list-claims ``` NAME: - lotus filplus list-claims - List claims made by provider + lotus filplus list-claims - List claims available in verified registry actor or made by provider if specified USAGE: lotus filplus list-claims [command options] providerAddress diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 9f9836bc0..3f3a6b171 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -260,6 +260,68 @@ #HotstoreMaxSpaceSafetyBuffer = 50000000000 +[Cluster] + # EXPERIMENTAL. config to enabled node cluster with raft consensus + # + # type: bool + # env var: LOTUS_CLUSTER_CLUSTERMODEENABLED + #ClusterModeEnabled = false + + # A folder to store Raft's data. + # + # type: string + # env var: LOTUS_CLUSTER_DATAFOLDER + #DataFolder = "" + + # InitPeersetMultiAddr provides the list of initial cluster peers for new Raft + # peers (with no prior state). It is ignored when Raft was already + # initialized or when starting in staging mode. + # + # type: []string + # env var: LOTUS_CLUSTER_INITPEERSETMULTIADDR + #InitPeersetMultiAddr = [] + + # LeaderTimeout specifies how long to wait for a leader before + # failing an operation. + # + # type: Duration + # env var: LOTUS_CLUSTER_WAITFORLEADERTIMEOUT + #WaitForLeaderTimeout = "15s" + + # NetworkTimeout specifies how long before a Raft network + # operation is timed out + # + # type: Duration + # env var: LOTUS_CLUSTER_NETWORKTIMEOUT + #NetworkTimeout = "1m40s" + + # CommitRetries specifies how many times we retry a failed commit until + # we give up. + # + # type: int + # env var: LOTUS_CLUSTER_COMMITRETRIES + #CommitRetries = 1 + + # How long to wait between retries + # + # type: Duration + # env var: LOTUS_CLUSTER_COMMITRETRYDELAY + #CommitRetryDelay = "200ms" + + # BackupsRotate specifies the maximum number of Raft's DataFolder + # copies that we keep as backups (renaming) after cleanup. + # + # type: int + # env var: LOTUS_CLUSTER_BACKUPSROTATE + #BackupsRotate = 6 + + # Tracing enables propagation of contexts across binary boundaries. + # + # type: bool + # env var: LOTUS_CLUSTER_TRACING + #Tracing = false + + [Fevm] # EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. @@ -275,57 +337,66 @@ # env var: LOTUS_FEVM_ETHTXHASHMAPPINGLIFETIMEDAYS #EthTxHashMappingLifetimeDays = 0 - [Fevm.Events] - # EnableEthRPC enables APIs that - # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - # The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_FEVM_EVENTS_DISABLEREALTIMEFILTERAPI - #DisableRealTimeFilterAPI = false - # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_FEVM_EVENTS_DISABLEHISTORICFILTERAPI - #DisableHistoricFilterAPI = false +[Events] + # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. + # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. + # + # type: bool + # env var: LOTUS_EVENTS_DISABLEREALTIMEFILTERAPI + #DisableRealTimeFilterAPI = false - # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than - # this time become eligible for automatic deletion. - # - # type: Duration - # env var: LOTUS_FEVM_EVENTS_FILTERTTL - #FilterTTL = "24h0m0s" + # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events + # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. + # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. + # + # type: bool + # env var: LOTUS_EVENTS_DISABLEHISTORICFILTERAPI + #DisableHistoricFilterAPI = false - # MaxFilters specifies the maximum number of filters that may exist at any one time. - # - # type: int - # env var: LOTUS_FEVM_EVENTS_MAXFILTERS - #MaxFilters = 100 + # EnableActorEventsAPI enables the Actor events API that enables clients to consume events + # emitted by (smart contracts + built-in Actors). + # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be + # disabled by setting their respective Disable* options. + # + # type: bool + # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI + #EnableActorEventsAPI = false - # MaxFilterResults specifies the maximum number of results that can be accumulated by an actor event filter. - # - # type: int - # env var: LOTUS_FEVM_EVENTS_MAXFILTERRESULTS - #MaxFilterResults = 10000 + # FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than + # this time become eligible for automatic deletion. + # + # type: Duration + # env var: LOTUS_EVENTS_FILTERTTL + #FilterTTL = "24h0m0s" - # MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying - # the entire chain) - # - # type: uint64 - # env var: LOTUS_FEVM_EVENTS_MAXFILTERHEIGHTRANGE - #MaxFilterHeightRange = 2880 + # MaxFilters specifies the maximum number of filters that may exist at any one time. + # + # type: int + # env var: LOTUS_EVENTS_MAXFILTERS + #MaxFilters = 100 - # DatabasePath is the full path to a sqlite database that will be used to index actor events to - # support the historic filter APIs. If the database does not exist it will be created. The directory containing - # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - # relative to the CWD (current working directory). - # - # type: string - # env var: LOTUS_FEVM_EVENTS_DATABASEPATH - #DatabasePath = "" + # MaxFilterResults specifies the maximum number of results that can be accumulated by an actor event filter. + # + # type: int + # env var: LOTUS_EVENTS_MAXFILTERRESULTS + #MaxFilterResults = 10000 + + # MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying + # the entire chain) + # + # type: uint64 + # env var: LOTUS_EVENTS_MAXFILTERHEIGHTRANGE + #MaxFilterHeightRange = 2880 + + # DatabasePath is the full path to a sqlite database that will be used to index actor events to + # support the historic filter APIs. If the database does not exist it will be created. The directory containing + # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as + # relative to the CWD (current working directory). + # + # type: string + # env var: LOTUS_EVENTS_DATABASEPATH + #DatabasePath = "" [Index] diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index a65e82e95..17fd24fa3 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -702,6 +702,30 @@ # env var: LOTUS_SEALING_USESYNTHETICPOREP #UseSyntheticPoRep = false + # Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESS + #RequireActivationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIREACTIVATIONSUCCESSUPDATE + #RequireActivationSuccessUpdate = false + + # Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESS + #RequireNotificationSuccess = false + + # Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + # + # type: bool + # env var: LOTUS_SEALING_REQUIRENOTIFICATIONSUCCESSUPDATE + #RequireNotificationSuccessUpdate = false + [Storage] # type: int diff --git a/documentation/en/default-lotus-provider-config.toml b/documentation/en/default-lotus-provider-config.toml index 8573fbda1..91606e503 100644 --- a/documentation/en/default-lotus-provider-config.toml +++ b/documentation/en/default-lotus-provider-config.toml @@ -11,14 +11,6 @@ # type: int #WinningPostMaxTasks = 0 - # type: bool - #EnableWebGui = false - - # The address that should listen for Web GUI requests. - # - # type: string - #GuiAddress = ":4701" - [Fees] # type: types.FIL diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 4176a1e66..b715c9403 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 4176a1e662e865834bfdc5861da921dc2d272b45 +Subproject commit b715c9403faf919e95fdc702cd651e842f18d890 diff --git a/gateway/node.go b/gateway/node.go index f2464d274..e9c695c4a 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -146,6 +146,10 @@ type TargetAPI interface { Web3ClientVersion(ctx context.Context) (string, error) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error) EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) + + GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) + ChainGetEvents(ctx context.Context, eventsRoot cid.Cid) ([]types.Event, error) } var _ TargetAPI = *new(api.FullNode) // gateway depends on latest diff --git a/gateway/proxy_fil.go b/gateway/proxy_fil.go index eb8a354ed..9daa0796d 100644 --- a/gateway/proxy_fil.go +++ b/gateway/proxy_fil.go @@ -437,6 +437,37 @@ func (gw *Node) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64 return gw.target.StateWaitMsg(ctx, msg, confidence, limit, allowReplaced) } +func (gw *Node) GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if err := gw.limit(ctx, stateRateLimitTokens); err != nil { + return nil, err + } + if filter != nil && filter.FromHeight != nil { + if err := gw.checkTipSetHeight(ctx, *filter.FromHeight, types.EmptyTSK); err != nil { + return nil, err + } + } + return gw.target.GetActorEventsRaw(ctx, filter) +} + +func (gw *Node) SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if err := gw.limit(ctx, stateRateLimitTokens); err != nil { + return nil, err + } + if filter != nil && filter.FromHeight != nil { + if err := gw.checkTipSetHeight(ctx, *filter.FromHeight, types.EmptyTSK); err != nil { + return nil, err + } + } + return gw.target.SubscribeActorEventsRaw(ctx, filter) +} + +func (gw *Node) ChainGetEvents(ctx context.Context, eventsRoot cid.Cid) ([]types.Event, error) { + if err := gw.limit(ctx, chainRateLimitTokens); err != nil { + return nil, err + } + return gw.target.ChainGetEvents(ctx, eventsRoot) +} + func (gw *Node) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { if err := gw.limit(ctx, stateRateLimitTokens); err != nil { return nil, err diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json index cf72d24fa..70c8fff61 100644 --- a/gen/inlinegen-data.json +++ b/gen/inlinegen-data.json @@ -1,7 +1,7 @@ { - "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], - "latestActorsVersion": 12, + "actorVersions": [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], + "latestActorsVersion": 13, - "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], - "latestNetworkVersion": 21 + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], + "latestNetworkVersion": 22 } diff --git a/gen/main.go b/gen/main.go index 942b3ac2c..f1fe3876a 100644 --- a/gen/main.go +++ b/gen/main.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen" "github.com/filecoin-project/lotus/node/hello" "github.com/filecoin-project/lotus/paychmgr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" sectorstorage "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -38,6 +39,7 @@ func main() { types.EventEntry{}, // Tracing types.GasTrace{}, + types.ActorTrace{}, types.MessageTrace{}, types.ReturnTrace{}, types.ExecutionTrace{}, @@ -63,9 +65,7 @@ func main() { api.SealedRefs{}, api.SealTicket{}, api.SealSeed{}, - api.PieceDealInfo{}, api.SectorPiece{}, - api.DealSchedule{}, ) if err != nil { fmt.Println(err) @@ -110,6 +110,15 @@ func main() { os.Exit(1) } + err = gen.WriteMapEncodersToFile("./storage/pipeline/piece/cbor_gen.go", "piece", + piece.PieceDealInfo{}, + piece.DealSchedule{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + err = gen.WriteMapEncodersToFile("./storage/sealer/cbor_gen.go", "sealer", sectorstorage.Call{}, sectorstorage.WorkState{}, diff --git a/go.mod b/go.mod index c1b353fab..4623f942b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/filecoin-project/lotus -go 1.20 +go 1.21 retract v1.14.0 // Accidentally force-pushed tag, use v1.14.1+ instead. @@ -8,7 +8,7 @@ retract v1.20.2 // Wrongfully cherry picked PR, use v1.20.2+ instead. require ( contrib.go.opencensus.io/exporter/prometheus v0.4.2 - github.com/BurntSushi/toml v1.2.1 + github.com/BurntSushi/toml v1.3.0 github.com/DataDog/zstd v1.4.5 github.com/GeertJohan/go.rice v1.0.3 github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee @@ -22,14 +22,14 @@ require ( github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e github.com/dgraph-io/badger/v2 v2.2007.4 github.com/docker/go-units v0.5.0 - github.com/drand/drand v1.4.9 - github.com/drand/kyber v1.1.15 + github.com/drand/drand v1.5.7 + github.com/drand/kyber v1.2.0 github.com/dustin/go-humanize v1.0.1 github.com/elastic/go-elasticsearch/v7 v7.14.0 github.com/elastic/go-sysinfo v1.7.0 github.com/elastic/gosigar v0.14.2 github.com/etclabscore/go-openrpc-reflect v0.0.36 - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.15.0 github.com/filecoin-project/dagstore v0.5.2 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20220519234331-bfd1f5f9fe38 github.com/filecoin-project/go-address v1.1.0 @@ -37,19 +37,21 @@ require ( github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 github.com/filecoin-project/go-commp-utils v0.1.3 + github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 github.com/filecoin-project/go-crypto v0.0.1 github.com/filecoin-project/go-data-transfer/v2 v2.0.0-rc7 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-fil-markets v1.28.3 + github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 github.com/filecoin-project/go-jsonrpc v0.3.1 github.com/filecoin-project/go-padreader v0.0.1 github.com/filecoin-project/go-paramfetch v0.0.4 - github.com/filecoin-project/go-state-types v0.12.8 + github.com/filecoin-project/go-state-types v0.13.1 github.com/filecoin-project/go-statemachine v1.0.3 github.com/filecoin-project/go-statestore v0.2.0 github.com/filecoin-project/go-storedcounter v0.1.0 - github.com/filecoin-project/kubo-api-client v0.0.1 + github.com/filecoin-project/kubo-api-client v0.27.0 github.com/filecoin-project/pubsub v1.0.0 github.com/filecoin-project/specs-actors v0.9.15 github.com/filecoin-project/specs-actors/v2 v2.3.6 @@ -65,20 +67,23 @@ require ( github.com/georgysavva/scany/v2 v2.0.0 github.com/go-openapi/spec v0.19.11 github.com/golang/mock v1.6.0 - github.com/google/uuid v1.3.0 - github.com/gorilla/mux v1.8.0 - github.com/gorilla/websocket v1.5.0 + github.com/google/uuid v1.5.0 + github.com/gorilla/mux v1.8.1 + github.com/gorilla/websocket v1.5.1 github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/hashicorp/go-hclog v1.3.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/arc/v2 v2.0.5 - github.com/hashicorp/golang-lru/v2 v2.0.5 + github.com/hashicorp/golang-lru/v2 v2.0.7 + github.com/hashicorp/raft v1.3.10 + github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/boxo v0.10.1 - github.com/ipfs/go-block-format v0.1.2 + github.com/ipfs/boxo v0.18.0 + github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-cidutil v0.1.0 github.com/ipfs/go-datastore v0.6.0 @@ -86,19 +91,18 @@ require ( github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-graphsync v0.14.6 + github.com/ipfs/go-graphsync v0.16.0 github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 - github.com/ipfs/go-ipld-cbor v0.0.6 - github.com/ipfs/go-ipld-format v0.5.0 + github.com/ipfs/go-ipld-cbor v0.1.0 + github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 - github.com/ipfs/go-unixfsnode v1.7.1 + github.com/ipfs/go-unixfsnode v1.9.0 github.com/ipld/go-car v0.6.1 - github.com/ipld/go-car/v2 v2.10.1 + github.com/ipld/go-car/v2 v2.13.1 github.com/ipld/go-codec-dagpb v1.6.0 - github.com/ipld/go-ipld-prime v0.20.0 + github.com/ipld/go-ipld-prime v0.21.0 github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/ipni/go-libipni v0.0.8 github.com/ipni/index-provider v0.12.0 @@ -107,19 +111,22 @@ require ( github.com/kelseyhightower/envconfig v1.4.0 github.com/koalacxr/quantile v0.0.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.31.1 - github.com/libp2p/go-libp2p-kad-dht v0.24.0 - github.com/libp2p/go-libp2p-pubsub v0.9.3 + github.com/libp2p/go-libp2p v0.33.2 + github.com/libp2p/go-libp2p-consensus v0.0.1 + github.com/libp2p/go-libp2p-gorpc v0.6.0 + github.com/libp2p/go-libp2p-kad-dht v0.25.2 + github.com/libp2p/go-libp2p-pubsub v0.10.0 + github.com/libp2p/go-libp2p-raft v0.4.0 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.7.0 + github.com/libp2p/go-libp2p-routing-helpers v0.7.3 github.com/libp2p/go-maddr-filter v0.1.0 github.com/libp2p/go-msgio v0.3.0 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.12.1 + github.com/multiformats/go-multiaddr v0.12.3 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 @@ -129,82 +136,84 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 github.com/polydawn/refmt v0.89.0 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.18.0 github.com/puzpuzpuz/xsync/v2 v2.4.0 github.com/raulk/clock v1.1.0 github.com/raulk/go-watchdog v1.3.0 - github.com/samber/lo v1.38.1 - github.com/stretchr/testify v1.8.4 + github.com/samber/lo v1.39.0 + github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed + github.com/triplewz/poseidon v0.0.0-20220525065023-a7cdb0e183e7 github.com/urfave/cli/v2 v2.25.5 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba - github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f + github.com/whyrusleeping/cbor-gen v0.1.0 github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4 github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 github.com/xeipuuv/gojsonschema v1.2.0 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 github.com/zyedidia/generic v1.2.1 go.opencensus.io v0.24.0 - go.opentelemetry.io/otel v1.16.0 + go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/bridge/opencensus v0.39.0 go.opentelemetry.io/otel/exporters/jaeger v1.14.0 - go.opentelemetry.io/otel/sdk v1.16.0 + go.opentelemetry.io/otel/sdk v1.21.0 go.uber.org/atomic v1.11.0 - go.uber.org/fx v1.20.0 + go.uber.org/fx v1.20.1 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.17.0 - golang.org/x/net v0.14.0 - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.15.0 - golang.org/x/term v0.15.0 - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.19.0 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/net v0.21.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 + golang.org/x/time v0.5.0 + golang.org/x/tools v0.18.0 + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible ) require ( github.com/GeertJohan/go.incremental v1.0.0 // indirect + github.com/Jorropo/jsync v1.0.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect github.com/akavel/rsrc v0.8.0 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/armon/go-metrics v0.3.9 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bep/debounce v1.2.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/boltdb/bolt v1.3.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cilium/ebpf v0.9.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/daaku/go.zipexe v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/drand/kyber-bls12381 v0.2.3 // indirect + github.com/drand/kyber-bls12381 v0.3.1 // indirect github.com/elastic/go-windows v1.0.0 // indirect github.com/etclabscore/go-jsonschema-walk v0.0.6 // indirect github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 // indirect github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 // indirect - github.com/filecoin-project/go-commp-utils/nonffi v0.0.0-20220905160352-62059082a837 // indirect github.com/filecoin-project/go-ds-versioning v0.1.2 // indirect github.com/filecoin-project/go-hamt-ipld v0.1.5 // indirect github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 // indirect - github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 // indirect - github.com/flynn/noise v1.0.0 // indirect + github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gdamore/encoding v1.0.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-openapi/jsonpointer v0.19.3 // indirect @@ -213,22 +222,24 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/hannahhoward/cbor-gen-for v0.0.0-20230214144701-5d17c9d5243c // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/golang-lru v0.6.0 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/iancoleman/orderedmap v0.1.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-blockservice v0.5.1 // indirect github.com/ipfs/go-ipfs-blockstore v1.3.0 // indirect - github.com/ipfs/go-ipfs-cmds v0.9.0 // indirect + github.com/ipfs/go-ipfs-cmds v0.10.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect github.com/ipfs/go-ipfs-ds-help v1.1.0 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect @@ -254,14 +265,14 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/compress v1.17.6 // indirect + github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect github.com/libp2p/go-libp2p-gostream v0.6.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.1 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect @@ -272,40 +283,40 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/miekg/dns v1.1.58 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect github.com/nikkolasg/hexjson v0.1.0 // indirect github.com/nkovacs/streamquote v1.0.0 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect - github.com/opencontainers/runtime-spec v1.1.0 // indirect + github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.47.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/statsd_exporter v0.22.7 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.3 // indirect - github.com/quic-go/quic-go v0.38.2 // indirect - github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/quic-go/quic-go v0.42.0 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/rivo/uniseg v0.1.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v2.18.12+incompatible // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.2 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/tidwall/gjson v1.14.4 // indirect github.com/twmb/murmur3 v1.1.6 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.0.1 // indirect + github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect @@ -314,18 +325,18 @@ require ( github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/zondax/hid v0.9.1 // indirect github.com/zondax/ledger-go v0.12.1 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect - go.uber.org/dig v1.17.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/mock v0.4.0 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect - google.golang.org/grpc v1.55.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + gonum.org/v1/gonum v0.14.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect + google.golang.org/grpc v1.60.1 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect @@ -335,3 +346,5 @@ require ( replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi replace github.com/filecoin-project/test-vectors => ./extern/test-vectors + +replace github.com/triplewz/poseidon => github.com/magik6k/poseidon v0.0.0-neptune diff --git a/go.sum b/go.sum index a7983d80c..4fe32e664 100644 --- a/go.sum +++ b/go.sum @@ -44,10 +44,13 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.0 h1:Ws8e5YmnrGEHzZEzg0YvK/7COGYtTC5PbaH9oSSbgfA= +github.com/BurntSushi/toml v1.3.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -57,6 +60,8 @@ github.com/GeertJohan/go.rice v1.0.3 h1:k5viR+xGtIhF61125vCE1cmJ5957RQGXG6dmbaWZ github.com/GeertJohan/go.rice v1.0.3/go.mod h1:XVdrU4pW00M4ikZed5q56tPf1v2KwnIKeIdc9CBYNt4= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= @@ -89,14 +94,20 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/ardanlabs/darwin/v2 v2.0.0 h1:XCisQMgQ5EG+ZvSEcADEo+pyfIMKyWAGnn5o2TgriYE= +github.com/ardanlabs/darwin/v2 v2.0.0/go.mod h1:MubZ2e9DAYGaym0mClSOi183NYahrrfKxvSy1HMhoes= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= @@ -113,6 +124,10 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -121,6 +136,7 @@ github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcug github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -157,10 +173,13 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/cockroach-go/v2 v2.2.0 h1:/5znzg5n373N/3ESjHF5SMLxiW4RKB05Ql//KWfeTFs= +github.com/cockroachdb/cockroach-go/v2 v2.2.0/go.mod h1:u3MiKYGupPPjkn3ozknpMUpxPaNLTFWAya419/zv6eI= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= @@ -184,8 +203,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= +github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= @@ -200,6 +219,7 @@ github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQY github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= +github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= @@ -210,13 +230,14 @@ github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhY github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -225,19 +246,12 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUn github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.4.9 h1:WE8Jf/l+7B/rheCMCLZTp5xk0/a05t+ciwBvORq9jXM= -github.com/drand/drand v1.4.9/go.mod h1:vsmJ/kDoVLv1NC0nFihzBPmIFvMGmYtgJewzRBBRVSc= -github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= -github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= -github.com/drand/kyber v1.1.10/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= -github.com/drand/kyber v1.1.15 h1:YNL02FPOA98GmlIhh5FuEJWhz1ZCp6tOUVFN7ujBJPE= -github.com/drand/kyber v1.1.15/go.mod h1:tw0l70U6aWCkc4vDr8u/canpOOOiUNJlzsmeElhBfe0= -github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= -github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= -github.com/drand/kyber-bls12381 v0.2.3 h1:wueWtqjj71wnwm6fYR8MAQk4q8bKVK9WukrGGcaVxzk= -github.com/drand/kyber-bls12381 v0.2.3/go.mod h1:FsudUZf6Xu61u/gYrDHEHf6lKIKluJdnX7WJe4hkMh4= +github.com/drand/drand v1.5.7 h1:5f2D5aH1nEfVI9S6tl2p9bgIDMZ92oltmiY12Kh+eYU= +github.com/drand/drand v1.5.7/go.mod h1:jrJ0244yOHNL5V04vazk3mFatjAWm3i6dg6URWwgbXk= +github.com/drand/kyber v1.2.0 h1:22SbBxsKbgQnJUoyYKIfG909PhBsj0vtANeu4BX5xgE= +github.com/drand/kyber v1.2.0/go.mod h1:6TqFlCc7NGOiNVTF9pF2KcDRfllPd9XOkExuG5Xtwfo= +github.com/drand/kyber-bls12381 v0.3.1 h1:KWb8l/zYTP5yrvKTgvhOrk2eNPscbMiUOIeWBnmUxGo= +github.com/drand/kyber-bls12381 v0.3.1/go.mod h1:H4y9bLPu7KZA/1efDg+jtJ7emKx+ro3PU7/jWUVt140= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -266,9 +280,11 @@ github.com/etclabscore/go-openrpc-reflect v0.0.36 h1:kSqNB2U8RVoW4si+4fsv13NGNkR github.com/etclabscore/go-openrpc-reflect v0.0.36/go.mod h1:0404Ky3igAasAOpyj1eESjstTyneBAIk5PgJFbK4s5E= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/filecoin-project/dagstore v0.5.2 h1:Nd6oXdnolbbVhpMpkYT5PJHOjQp4OBSntHpMV5pxj3c= github.com/filecoin-project/dagstore v0.5.2/go.mod h1:mdqKzYrRBHf1pRMthYfMv3n37oOw0Tkx7+TxPt240M0= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= @@ -323,6 +339,7 @@ github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MU github.com/filecoin-project/go-paramfetch v0.0.4 h1:H+Me8EL8T5+79z/KHYQQcT8NVOzYVqXIi7nhb48tdm8= github.com/filecoin-project/go-paramfetch v0.0.4/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-retrieval-types v1.2.0 h1:fz6DauLVP3GRg7UuW7HZ6sE+GTmaUW70DTXBF1r9cK0= +github.com/filecoin-project/go-retrieval-types v1.2.0/go.mod h1:ojW6wSw2GPyoRDBGqw1K6JxUcbfa5NOSIiyQEeh7KK0= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= @@ -330,8 +347,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q= github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8= -github.com/filecoin-project/go-state-types v0.12.8 h1:W/UObdAsv+LbB9EfyLg92DSYoatzUWmlfV8FGyh30VA= -github.com/filecoin-project/go-state-types v0.12.8/go.mod h1:gR2NV0CSGSQwopxF+3In9nDh1sqvoYukLcs5vK0AHCA= +github.com/filecoin-project/go-state-types v0.13.1 h1:4CivvlcHAIoAtFFVVlZtokynaMQu5XLXGoTKhQkfG1I= +github.com/filecoin-project/go-state-types v0.13.1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.3 h1:N07o6alys+V1tNoSTi4WuuoeNC4erS/6jE74+NsgQuk= github.com/filecoin-project/go-statemachine v1.0.3/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= @@ -340,8 +357,8 @@ github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNd github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= -github.com/filecoin-project/kubo-api-client v0.0.1 h1:IR1b+sm+VYxSRvbgECVv9SbhIgygcXcSoN1Q7xsHDXg= -github.com/filecoin-project/kubo-api-client v0.0.1/go.mod h1:c36PPMIVOkKfHDwDG5U05gUlPRY9wNuh/BePwo0e+6Y= +github.com/filecoin-project/kubo-api-client v0.27.0 h1:rQNbReJCCQ8L107VIQR0qjAlEqdDQRYOhDKYcKGcnPI= +github.com/filecoin-project/kubo-api-client v0.27.0/go.mod h1:1+geFlaV8oJRJ4IlVTqL3QC3T1f5N0aGSptErrtcMQs= github.com/filecoin-project/pubsub v1.0.0 h1:ZTmT27U07e54qV1mMiQo4HDr0buo8I1LDHBYLXlsNXM= github.com/filecoin-project/pubsub v1.0.0/go.mod h1:GkpB33CcUtUNrLPhJgfdy4FDx4OMNR9k+46DHx/Lqrg= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= @@ -365,8 +382,9 @@ github.com/filecoin-project/specs-actors/v8 v8.0.1/go.mod h1:UYIPg65iPWoFw5NEftR github.com/filecoin-project/test-vectors/schema v0.0.7 h1:hhrcxLnQR2Oe6fjk63hZXG1fWQGyxgCVXOOlAlR/D9A= github.com/filecoin-project/test-vectors/schema v0.0.7/go.mod h1:WqdmeJrz0V37wp7DucRR/bvrScZffqaCyIk9G0BGw1o= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= +github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= +github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -374,7 +392,8 @@ github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -391,6 +410,7 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= +github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -405,12 +425,13 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= @@ -445,8 +466,10 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -455,9 +478,10 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= +github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -515,8 +539,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -532,15 +557,15 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= -github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= +github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -549,27 +574,30 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487 h1:NyaWOSkqFK1d9o+HLfnMIGzrHuUUPeBNIZyi5Zoe/lY= github.com/gregdhill/go-openrpc v0.0.0-20220114144539-ae6f44720487/go.mod h1:a1eRkbhd3DYpRH2lnuUsVG+QMTI+v0hGnsis8C9hMrA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ= @@ -583,38 +611,52 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.3.0 h1:G0ACM8Z2WilWgPv3Vdzwm3V0BQu/kSmrkVtpe1fy9do= +github.com/hashicorp/go-hclog v1.3.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= -github.com/hashicorp/golang-lru v0.6.0/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= -github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= -github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= +github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.1.0 h1:2orAxZBJsvimgEBmMWfXaFlzSG2fbQil5qzP3F6cCkg= @@ -633,19 +675,20 @@ github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.10.1 h1:q0ZhbyN6iNZLipd6txt1xotCiP/icfvdAQ4YpUi+cL4= -github.com/ipfs/boxo v0.10.1/go.mod h1:1qgKq45mPRCxf4ZPoJV2lnXxyxucigILMJOrQrVivv8= +github.com/ipfs/boxo v0.18.0 h1:MOL9/AgoV3e7jlVMInicaSdbgralfqSsbkc31dZ9tmw= +github.com/ipfs/boxo v0.18.0/go.mod h1:pIZgTWdm3k3pLF9Uq6MB8JEcW07UDwNJjlXW1HELW80= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitfield v1.1.0/go.mod h1:paqf1wjq/D2BBmzfTVFlJQ9IlFOZpg422HL0HqsGWHU= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-bitswap v0.11.0 h1:j1WVvhDX1yhG32NTC9xfxnqycqYIlhzEzLXG/cU1HyQ= +github.com/ipfs/go-bitswap v0.11.0/go.mod h1:05aE8H3XOU+LXpTedeAS0OZpcO1WFsj5niYQH9a1Tmk= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= -github.com/ipfs/go-block-format v0.1.2 h1:GAjkfhVx1f4YTODS6Esrj1wt2HhrtwTnhEr+DyPUaJo= -github.com/ipfs/go-block-format v0.1.2/go.mod h1:mACVcrxarQKstUU3Yf/RdwbC4DzPV6++rO2a3d+a/KE= +github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= +github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-blockservice v0.5.1 h1:9pAtkyKAz/skdHTh0kH8VulzWp+qmSDD0aI17TYP/s0= @@ -686,6 +729,7 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= github.com/ipfs/go-ds-badger2 v0.1.3 h1:Zo9JicXJ1DmXTN4KOw7oPXkspZ0AWHcAFCP1tQKnegg= github.com/ipfs/go-ds-badger2 v0.1.3/go.mod h1:TPhhljfrgewjbtuL/tczP8dNrBYwwk+SdPYbms/NO9w= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= @@ -698,8 +742,8 @@ github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9 github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= -github.com/ipfs/go-graphsync v0.14.6 h1:NPxvuUy4Z08Mg8dwpBzwgbv/PGLIufSJ1sle6iAX8yo= -github.com/ipfs/go-graphsync v0.14.6/go.mod h1:yT0AfjFgicOoWdAlUJ96tQ5AkuGI4r1taIQX/aHbBQo= +github.com/ipfs/go-graphsync v0.16.0 h1:0BX7whXlV13Y9FZ/jRg+xaGHaGYbtGxGppKD6tncw6k= +github.com/ipfs/go-graphsync v0.16.0/go.mod h1:WfbMW3hhmX5GQEQ+KJxsFzVJVBKgC5szfrYK7Zc7xIM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= @@ -711,8 +755,9 @@ github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IW github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-cmds v0.9.0 h1:K0VcXg1l1k6aY6sHnoxYcyimyJQbcV1ueXuWgThmK9Q= -github.com/ipfs/go-ipfs-cmds v0.9.0/go.mod h1:SBFHK8WNwC416QWH9Vz1Ql42SSMAOqKpaHUMBu3jpLo= +github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= +github.com/ipfs/go-ipfs-cmds v0.10.0 h1:ZB4+RgYaH4UARfJY0uLKl5UXgApqnRjKbuCiJVcErYk= +github.com/ipfs/go-ipfs-cmds v0.10.0/go.mod h1:sX5d7jkCft9XLPnkgEfXY0z2UBOB5g6fh/obBS0enJE= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= @@ -731,6 +776,7 @@ github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= +github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= @@ -740,6 +786,7 @@ github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3 github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= +github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= @@ -749,15 +796,16 @@ github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= -github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.1.0 h1:dx0nS0kILVivGhfWuB6dUpMa/LAwElHPw1yOGYopoYs= +github.com/ipfs/go-ipld-cbor v0.1.0/go.mod h1:U2aYlmVrJr2wsUBU67K4KgepApSZddGRDWBYR0H4sCk= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= github.com/ipfs/go-ipld-format v0.4.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= -github.com/ipfs/go-ipld-format v0.5.0 h1:WyEle9K96MSrvr47zZHKKcDxJ/vlpET6PSiQsAFO+Ds= -github.com/ipfs/go-ipld-format v0.5.0/go.mod h1:ImdZqJQaEouMjCvqCe0ORUS+uoBmf7Hf+EO/jh+nk3M= +github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= +github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= @@ -799,8 +847,9 @@ github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVzte github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfsnode v1.7.1 h1:RRxO2b6CSr5UQ/kxnGzaChTjp5LWTdf3Y4n8ANZgB/s= -github.com/ipfs/go-unixfsnode v1.7.1/go.mod h1:PVfoyZkX1B34qzT3vJO4nsLUpRCyhnMuHBznRcXirlk= +github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= +github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= +github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= github.com/ipfs/go-verifcid v0.0.2/go.mod h1:40cD9x1y4OWnFXbLNJYRe7MpNvWlMn3LZAG5Wb4xnPU= @@ -808,8 +857,8 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH github.com/ipld/go-car v0.6.1 h1:blWbEHf1j62JMWFIqWE//YR0m7k5ZMw0AuUOU5hjrH8= github.com/ipld/go-car v0.6.1/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= -github.com/ipld/go-car/v2 v2.10.1 h1:MRDqkONNW9WRhB79u+Z3U5b+NoN7lYA5B8n8qI3+BoI= -github.com/ipld/go-car/v2 v2.10.1/go.mod h1:sQEkXVM3csejlb1kCCb+vQ/pWBKX9QtvsrysMQjOgOg= +github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= +github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= @@ -823,11 +872,12 @@ github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/ github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= github.com/ipld/go-ipld-prime v0.19.0/go.mod h1:Q9j3BaVXwaA3o5JUDNvptDDr/x8+F7FG6XJ8WI3ILg4= -github.com/ipld/go-ipld-prime v0.20.0 h1:Ud3VwE9ClxpO2LkCYP7vWPc0Fo+dYdYzgxUJZ3uRG4g= -github.com/ipld/go-ipld-prime v0.20.0/go.mod h1:PzqZ/ZR981eKbgdr3y2DJYeD/8bgMawdGVlJDE8kK+M= +github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= +github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipni/go-libipni v0.0.8 h1:0wLfZRSBG84swmZwmaLKul/iB/FlBkkl9ZcR1ub+Z+w= @@ -866,11 +916,14 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= +github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -879,6 +932,7 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -893,9 +947,6 @@ github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2vi github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/kilic/bls12-381 v0.0.0-20200607163746-32e1441c8a9f/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= -github.com/kilic/bls12-381 v0.0.0-20200731194930-64c428e1bff5/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= -github.com/kilic/bls12-381 v0.0.0-20200820230200-6b2c19996391/go.mod h1:XXfR6YFCRSrkEXbNlIyDsgXVNJWVUV30m/ebkVy9n6s= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= @@ -904,14 +955,13 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= +github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= +github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0= github.com/koalacxr/quantile v0.0.1/go.mod h1:bGN/mCZLZ4lrSDHRQ6Lglj9chowGux8sGUIND+DQeD0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -927,6 +977,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -935,7 +986,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= @@ -960,10 +1012,10 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.31.1 h1:mUiFPwdzC2zMLIATKVddjCuPXVbtC3BsKKVPMs4+jzY= -github.com/libp2p/go-libp2p v0.31.1/go.mod h1:+9TCv+XySSOdaxPF1WIgTK8rXP9jBb8WbemlMCSXGsU= -github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= -github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= +github.com/libp2p/go-libp2p v0.33.2 h1:vCdwnFxoGOXMKmaGHlDSnL4bM3fQeW8pgIa9DECnb40= +github.com/libp2p/go-libp2p v0.33.2/go.mod h1:zTeppLuCvUIkT118pFVzA8xzP/p2dJYOMApCkFh0Yww= +github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= +github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= @@ -977,6 +1029,8 @@ github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFk github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8= +github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1005,12 +1059,14 @@ github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFT github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-gorpc v0.6.0 h1:Z3ODCzbKe+2lUtEjRc+W+l8Olj63r68G5w1wrQ9ZsOw= +github.com/libp2p/go-libp2p-gorpc v0.6.0/go.mod h1:jGTsI/yn1xL/9VupJ+DIXo8ExobWDKjwVdjNAfhFKxk= github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU= github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= -github.com/libp2p/go-libp2p-kad-dht v0.24.0 h1:nZnFDQEFU4N8GzclnR+IGxIgR7k4PPCDk/GK9A28onk= -github.com/libp2p/go-libp2p-kad-dht v0.24.0/go.mod h1:lfu5T01EH+r6uDZ/8G+ObhwgzVyd0b1nb54AdT8XGhc= -github.com/libp2p/go-libp2p-kbucket v0.6.1 h1:Y/NIvALuY5/fJlOpaJor9Azg4eor15JskGs9Lb2EhH0= -github.com/libp2p/go-libp2p-kbucket v0.6.1/go.mod h1:dvWO707Oq/vhMVuUhyfLkw0QsOrJFETepbNfpVHSELI= +github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= +github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= @@ -1032,14 +1088,16 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo= -github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc= +github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= +github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-raft v0.4.0 h1:2atEs7/FWH35bRiLh8gTejGh5NA9u4eG7BXjpf/B+Z4= +github.com/libp2p/go-libp2p-raft v0.4.0/go.mod h1:qJCYtFBTbip2wngLxFeAb9o52XmAPi2vSIQ4hV7IpSA= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -1059,6 +1117,7 @@ github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= +github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= @@ -1148,6 +1207,8 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magik6k/poseidon v0.0.0-neptune h1:Dfz15iiYGGE9Esvn8pZFlbiiCrHuyZDxm6LGXQfaf9c= +github.com/magik6k/poseidon v0.0.0-neptune/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1166,6 +1227,7 @@ github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcncea github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1176,8 +1238,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= @@ -1185,16 +1247,14 @@ github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRC github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1249,8 +1309,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.12.1 h1:vm+BA/WZA8QZDp1pF1FWhi5CT3g1tbi5GJmqpb6wnlk= -github.com/multiformats/go-multiaddr v0.12.1/go.mod h1:7mPkiBMmLeFipt+nNSq9pHZUeJSt8lHBgH6yhj0YQzE= +github.com/multiformats/go-multiaddr v0.12.3 h1:hVBXvPRcKG0w80VinQ23P5t7czWgg65BmIvQKjDydU8= +github.com/multiformats/go-multiaddr v0.12.3/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1292,8 +1352,8 @@ github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -1316,8 +1376,8 @@ github.com/nikkolasg/hexjson v0.1.0 h1:Cgi1MSZVQFoJKYeRpBNEcdF3LB+Zo4fYKsDz7h8uJ github.com/nikkolasg/hexjson v0.1.0/go.mod h1:fbGbWFZ0FmJMFbpCMtJpwb0tudVxSSZ+Es2TsCg57cA= github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -1326,26 +1386,29 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 h1:CznVS40zms0Dj5he4ERo+fRPtO0qxUk8lA8Xu3ddet0= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= -github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= +github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= +github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1358,6 +1421,8 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1388,35 +1453,37 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k= +github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1428,20 +1495,18 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/statsd_exporter v0.22.7 h1:7Pji/i2GuhK6Lu7DHrtTkFmNBCudCPT1pX2CziuyQR0= github.com/prometheus/statsd_exporter v0.22.7/go.mod h1:N/TevpjkIh9ccs6nuzY3jQn9dFqnUakOjnEuMPJJJnI= github.com/puzpuzpuz/xsync/v2 v2.4.0 h1:5sXAMHrtx1bg9nbRZTOn8T4MkWe5V+o8yKRH02Eznag= github.com/puzpuzpuz/xsync/v2 v2.4.0/go.mod h1:gD2H2krq/w52MfPLE+Uy64TzJDVY7lP2znR9qmR35kU= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.3 h1:17/glZSLI9P9fDAeyCHBFSWSqJcwx1byhLwP5eUIDCM= -github.com/quic-go/qtls-go1-20 v0.3.3/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.2 h1:VWv/6gxIoB8hROQJhx1JEyiegsUQ+zMN3em3kynTGdg= -github.com/quic-go/quic-go v0.38.2/go.mod h1:ijnZM7JsFIkp4cRyjxJNIzdSfCLmUMg9wdyhGmg+SN4= -github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= +github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -1453,7 +1518,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -1464,11 +1530,12 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= -github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= +github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= +github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM= github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1500,13 +1567,14 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN23diwyr69Qs= +github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1539,8 +1607,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1552,8 +1621,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stvp/go-udp-testing v0.0.0-20201019212854-469649b16807/go.mod h1:7jxmlfBCDBXRzr0eAQJ48XC1hBu1np4CS5+cHEYfwpc= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -1571,13 +1640,16 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed h1:C8H2ql+vCBhEi7d3vMBBbdCAKv9s/thfPyLEuSvFpMU= -github.com/triplewz/poseidon v0.0.0-20230828015038-79d8165c88ed/go.mod h1:QYG1d0B4YZD7TgF6qZndTTu4rxUGFCCZAQRDanDj+9c= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= +github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -1593,14 +1665,19 @@ github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49u github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/warpfork/go-testmark v0.3.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= github.com/warpfork/go-testmark v0.10.0/go.mod h1:jhEf8FVxd+F17juRubpmut64NEG6I2rgkUhlcqqXwE0= -github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= +github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= +github.com/warpfork/go-testmark v0.12.1/go.mod h1:kHwy7wfvGSPh1rQJYKayD4AbtNaeyZdcGi9tNJTaa5Y= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/weaveworks/common v0.0.0-20220810113439-c65105d60b18 h1:JN4YR/TNWiZEAHHImrVA2u4DPI+aqPOar23ICUnYZTQ= +github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73 h1:CMM9+/AgM77vaMXMQedzqPRMuNwjbI0EcdofPqxc9F8= +github.com/weaveworks/common v0.0.0-20230531151736-e2613bee6b73/go.mod h1:rgbeLfJUtEr+G74cwFPR1k/4N0kDeaeSv/qhUNE4hm8= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= +github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIfyDmc1Em5GqlNRzcdtlv4HTNmdpt7XH0= @@ -1619,8 +1696,8 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f h1:SBuSxXJL0/ZJMtTxbXZgHZkThl9dNrzyaNhlyaqscRo= -github.com/whyrusleeping/cbor-gen v0.0.0-20230923211252-36a87e1ba72f/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.1.0 h1:Jneeq3V5enErVcuL0NKEbD1Gi+iOvEeFhXOV1S1Fc6g= +github.com/whyrusleeping/cbor-gen v0.1.0/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= @@ -1664,14 +1741,11 @@ github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XD github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= -go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= -go.dedis.ch/kyber/v3 v3.0.9/go.mod h1:rhNjUUg6ahf8HEg5HUvVBYoWY4boAafX8tYxX+PS+qg= -go.dedis.ch/protobuf v1.0.5/go.mod h1:eIV4wicvi6JK0q/QnfIEGeSFNG0ZeB24kzut5+HaRLo= -go.dedis.ch/protobuf v1.0.7/go.mod h1:pv5ysfkDX/EawiPqcW3ikOxsL5t+BqnV6xHSmE79KI4= go.dedis.ch/protobuf v1.0.11 h1:FTYVIEzY/bfl37lu3pR4lIj+F9Vp1jE8oh91VmxKgLo= go.dedis.ch/protobuf v1.0.11/go.mod h1:97QR256dnkimeNdfmURz0wAMNVbd1VmLXhG1CrTYrJ4= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1686,24 +1760,24 @@ go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/bridge/opencensus v0.39.0 h1:YHivttTaDhbZIHuPlg1sWsy2P5gj57vzqPfkHItgbwQ= go.opentelemetry.io/otel/bridge/opencensus v0.39.0/go.mod h1:vZ4537pNjFDXEx//WldAR6Ro2LC8wwmFC76njAXwNPE= go.opentelemetry.io/otel/exporters/jaeger v1.14.0 h1:CjbUNd4iN2hHmWekmOqZ+zSCU+dzZppG8XsV+A3oc8Q= go.opentelemetry.io/otel/exporters/jaeger v1.14.0/go.mod h1:4Ay9kk5vELRrbg5z4cpP9EtmQRFap2Wb0woPG4lujZA= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk/metric v0.39.0 h1:Kun8i1eYf48kHH83RucG93ffz0zGV1sh46FAScOTuDI= go.opentelemetry.io/otel/sdk/metric v0.39.0/go.mod h1:piDIRgjcK7u0HCL5pCA4e74qpK/jk3NiUoAHATVAmiI= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1711,13 +1785,16 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1733,8 +1810,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= @@ -1745,7 +1822,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1763,13 +1839,10 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1779,8 +1852,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1794,8 +1868,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1821,8 +1895,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1883,9 +1958,11 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1909,8 +1986,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1923,7 +2001,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1952,7 +2029,6 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1978,9 +2054,7 @@ golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200926100807-9d91bd62050c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2000,30 +2074,34 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2033,14 +2111,15 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2101,16 +2180,17 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= -gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= +gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -2173,8 +2253,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917 h1:nz5NESFLZbJGPFxDT/HCn+V1mZ8JGNoY4nUpmW/Y2eg= +google.golang.org/genproto v0.0.0-20240102182953-50ed04b92917/go.mod h1:pZqR+glSb11aJ+JQcczCvgf47+duRuzNSKqE8YAQnV0= +google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= +google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2197,8 +2281,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2214,8 +2298,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index fb28f4509..70da4be5a 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -4,6 +4,7 @@ package itests import ( "bytes" "context" + "strings" "testing" "time" @@ -16,7 +17,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" - minertypes "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/exitcode" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" @@ -183,13 +183,17 @@ func TestDeadlineToggling(t *testing.T) { cr, err := cid.Parse("bagboea4b5abcatlxechwbp7kjpjguna6r6q7ejrhe6mdp3lf34pmswn27pkkiekz") require.NoError(t, err) - params := &minertypes.SectorPreCommitInfo{ - Expiration: 2880 * 300, - SectorNumber: 22, - SealProof: kit.TestSpt, + params := &miner.PreCommitSectorBatchParams2{ + Sectors: []miner.SectorPreCommitInfo{ + { + Expiration: 2880 * 300, + SectorNumber: 22, + SealProof: kit.TestSpt, - SealedCID: cr, - SealRandEpoch: head.Height() - 200, + SealedCID: cr, + SealRandEpoch: head.Height() - 200, + }, + }, } enc := new(bytes.Buffer) @@ -199,7 +203,7 @@ func TestDeadlineToggling(t *testing.T) { To: maddrE, From: defaultFrom, Value: types.FromFil(1), - Method: builtin.MethodsMiner.PreCommitSector, + Method: builtin.MethodsMiner.PreCommitSectorBatch2, Params: enc.Bytes(), }, nil) require.NoError(t, err) @@ -286,14 +290,18 @@ func TestDeadlineToggling(t *testing.T) { sp, aerr := actors.SerializeParams(terminateSectorParams) require.NoError(t, aerr) - smsg, err := client.MpoolPushMessage(ctx, &types.Message{ - From: defaultFrom, - To: maddrD, - Method: builtin.MethodsMiner.TerminateSectors, + var smsg *types.SignedMessage + require.Eventually(t, func() bool { + smsg, err = client.MpoolPushMessage(ctx, &types.Message{ + From: defaultFrom, + To: maddrD, + Method: builtin.MethodsMiner.TerminateSectors, - Value: big.Zero(), - Params: sp, - }, nil) + Value: big.Zero(), + Params: sp, + }, nil) + return err == nil || !strings.Contains(err.Error(), "cannot terminate sectors in immutable deadline") + }, 60*time.Second, 100*time.Millisecond) require.NoError(t, err) t.Log("sent termination message:", smsg.Cid()) diff --git a/itests/direct_data_onboard_test.go b/itests/direct_data_onboard_test.go new file mode 100644 index 000000000..3d414ba1c --- /dev/null +++ b/itests/direct_data_onboard_test.go @@ -0,0 +1,426 @@ +package itests + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-commp-utils/nonffi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + market2 "github.com/filecoin-project/go-state-types/builtin/v9/market" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +func TestActors13Migration(t *testing.T) { + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + client, _, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version21, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version22, + Height: 10, + Migration: filcns.UpgradeActorsV13, + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + // mine until 15 + client.WaitTillChain(ctx, kit.HeightAtLeast(15)) +} + +func TestOnboardRawPiece(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC()) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: true, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + si, err := miner.SectorsStatus(ctx, so.Sector, false) + require.NoError(t, err) + require.Equal(t, dc.PieceCID, *si.CommD) +} + +func TestOnboardMixedMarketDDO(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.RequireActivationSuccess = true + sc.RequireNotificationSuccess = true + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + + var pieces []abi.PieceInfo + var dealID abi.DealID + + // market ddo piece + var marketSector api.SectorOffset + var marketPiece abi.PieceInfo + marketPieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + { + pieceData := make([]byte, marketPieceSize) + _, _ = rand.Read(pieceData) + + marketPiece, err = miner.ComputeDataCid(ctx, marketPieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, marketPiece) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + // PSD + + psdParams := market2.PublishStorageDealsParams{ + Deals: []market2.ClientDealProposal{ + makeMarketDealProposal(t, client, miner, marketPiece.PieceCID, marketPieceSize.Padded(), head.Height()+2880*2, head.Height()+2880*400), + }, + } + + psdMsg := &types.Message{ + To: market.Address, + From: mi.Worker, + + Method: market.Methods.PublishStorageDeals, + Params: must.One(cborutil.Dump(&psdParams)), + } + + smsg, err := client.MpoolPushMessage(ctx, psdMsg, nil) + require.NoError(t, err) + + r, err := client.StateWaitMsg(ctx, smsg.Cid(), 1, stmgr.LookbackNoLimit, true) + require.NoError(t, err) + + require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) + + nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) + require.NoError(t, err) + + res, err := market.DecodePublishStorageDealsReturn(r.Receipt.Return, nv) + require.NoError(t, err) + dealID = must.One(res.DealIDs())[0] + + mcid := smsg.Cid() + + marketSector, err = miner.SectorAddPieceToAny(ctx, marketPieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: &mcid, + DealID: dealID, + DealProposal: &psdParams.Deals[0].Proposal, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + PieceActivationManifest: nil, + KeepUnsealed: true, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(0), marketSector.Offset) + require.Equal(t, abi.SectorNumber(2), marketSector.Sector) + } + + // raw ddo piece + var rawSector api.SectorOffset + var rawPiece abi.PieceInfo + rawPieceSize := abi.PaddedPieceSize(2048 / 2).Unpadded() + { + pieceData := make([]byte, rawPieceSize) + _, _ = rand.Read(pieceData) + + rawPiece, err = miner.ComputeDataCid(ctx, rawPieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + pieces = append(pieces, rawPiece) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + rawSector, err = miner.SectorAddPieceToAny(ctx, rawPieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: rawPiece.PieceCID, + Size: rawPiece.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + require.Equal(t, abi.PaddedPieceSize(1024), rawSector.Offset) + require.Equal(t, abi.SectorNumber(2), rawSector.Sector) + } + + require.Equal(t, marketSector.Sector, rawSector.Sector) // sanity check same sector + + toCheck := map[abi.SectorNumber]struct{}{ + 2: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) + + expectCommD, err := nonffi.GenerateUnsealedCID(abi.RegisteredSealProof_StackedDrg2KiBV1_1, pieces) + require.NoError(t, err) + + si, err := miner.SectorsStatus(ctx, 2, false) + require.NoError(t, err) + require.Equal(t, expectCommD, *si.CommD) + + ds, err := client.StateMarketStorageDeal(ctx, dealID, types.EmptyTSK) + require.NoError(t, err) + + require.NotEqual(t, -1, ds.State.SectorStartEpoch) + + { + deals, err := client.StateMarketDeals(ctx, types.EmptyTSK) + require.NoError(t, err) + for id, deal := range deals { + fmt.Println("Deal", id, deal.Proposal.PieceCID, deal.Proposal.PieceSize, deal.Proposal.Client, deal.Proposal.Provider) + } + + // check actor events, verify deal-published is as expected + minerIdAddr, err := client.StateLookupID(ctx, maddr, types.EmptyTSK) + require.NoError(t, err) + minerId, err := address.IDFromAddress(minerIdAddr) + require.NoError(t, err) + caddr, err := client.WalletDefaultAddress(context.Background()) + require.NoError(t, err) + clientIdAddr, err := client.StateLookupID(ctx, caddr, types.EmptyTSK) + require.NoError(t, err) + clientId, err := address.IDFromAddress(clientIdAddr) + require.NoError(t, err) + + fmt.Println("minerId", minerId, "clientId", clientId) + for _, piece := range pieces { + fmt.Println("piece", piece.PieceCID, piece.Size) + } + + // check some actor events + var epochZero abi.ChainEpoch + allEvents, err := miner.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{ + FromHeight: &epochZero, + }) + require.NoError(t, err) + for _, key := range []string{"deal-published", "deal-activated", "sector-precommitted", "sector-activated"} { + var found bool + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + for _, event := range allEvents { + require.True(t, len(event.Entries) > 0) + if event.Entries[0].Key == "$type" && bytes.Equal(event.Entries[0].Value, keyBytes) { + found = true + switch key { + case "deal-published", "deal-activated": + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: keyBytes}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(2), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, event.Entries) + case "sector-activated": + // only one sector, that has both our pieces in it + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(rawSector.Sector)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: expectCommD}), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: marketPiece.PieceCID}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(marketPieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: rawPiece.PieceCID}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(rawPieceSize.Padded())), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, event.Entries) + } + break + } + } + require.True(t, found, "expected to find event %s", key) + } + } +} + +func TestOnboardRawPieceSnap(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + sc.PreferNewSectorsForDeals = false + sc.MakeNewSectorForDeals = false + sc.MakeCCSectorsAvailable = true + sc.AggregateCommits = false + })) + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + miner.PledgeSectors(ctx, 1, 0, nil) + sl, err := miner.SectorsListNonGenesis(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + + snum := sl[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + client.WaitForSectorActive(ctx, t, snum, maddr) + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + head, err := client.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, pieceSize, bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, // todo set so that it works with the sector + }, + KeepUnsealed: false, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + + // wait for sector to commit and enter proving state + toCheck := map[abi.SectorNumber]struct{}{ + so.Sector: {}, + } + + miner.WaitSectorsProving(ctx, toCheck) +} + +func makeMarketDealProposal(t *testing.T, client *kit.TestFullNode, miner *kit.TestMiner, data cid.Cid, ps abi.PaddedPieceSize, start, end abi.ChainEpoch) market2.ClientDealProposal { + ca, err := client.WalletDefaultAddress(context.Background()) + require.NoError(t, err) + + ma, err := miner.ActorAddress(context.Background()) + require.NoError(t, err) + + dp := market2.DealProposal{ + PieceCID: data, + PieceSize: ps, + VerifiedDeal: false, + Client: ca, + Provider: ma, + Label: must.One(market2.NewLabelFromString("wat")), + StartEpoch: start, + EndEpoch: end, + StoragePricePerEpoch: big.Zero(), + ProviderCollateral: abi.TokenAmount{}, // below + ClientCollateral: big.Zero(), + } + + cb, err := client.StateDealProviderCollateralBounds(context.Background(), dp.PieceSize, dp.VerifiedDeal, types.EmptyTSK) + require.NoError(t, err) + dp.ProviderCollateral = big.Div(big.Mul(cb.Min, big.NewInt(2)), big.NewInt(2)) + + buf, err := cborutil.Dump(&dp) + require.NoError(t, err) + sig, err := client.WalletSign(context.Background(), ca, buf) + require.NoError(t, err) + + return market2.ClientDealProposal{ + Proposal: dp, + ClientSignature: *sig, + } + +} diff --git a/itests/direct_data_onboard_verified_test.go b/itests/direct_data_onboard_verified_test.go new file mode 100644 index 000000000..df87a48a9 --- /dev/null +++ b/itests/direct_data_onboard_verified_test.go @@ -0,0 +1,764 @@ +package itests + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "strings" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/codec/dagjson" + "github.com/ipld/go-ipld-prime/datamodel" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/multiformats/go-multicodec" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/builtin" + minertypes13 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + verifregtypes13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + datacap2 "github.com/filecoin-project/go-state-types/builtin/v9/datacap" + verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/datacap" + minertypes "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet/key" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" + "github.com/filecoin-project/lotus/storage/pipeline/piece" +) + +var bogusPieceCid = cid.MustParse("baga6ea4seaaqa") + +func TestOnboardRawPieceVerified_WithActorEvents(t *testing.T) { + kit.QuietMiningLogs() + + var ( + blocktime = 2 * time.Millisecond + ctx = context.Background() + ) + + rootKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifierKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := key.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifierKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())), + ) + + /* --- Setup subscription channels for ActorEvents --- */ + + // subscribe only to miner's actor events + minerEvtsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ + Addresses: []address.Address{miner.ActorAddr}, + }) + require.NoError(t, err) + + // subscribe only to sector-activated events + sectorActivatedCbor := must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode)) + sectorActivatedEvtsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ + Fields: map[string][]types.ActorEventBlock{ + "$type": { + {Codec: uint64(multicodec.Cbor), Value: sectorActivatedCbor}, + }, + }, + }) + require.NoError(t, err) + + /* --- Start mining --- */ + + ens.InterconnectAll().BeginMiningMustPost(blocktime) + + minerId, err := address.IDFromAddress(miner.ActorAddr) + require.NoError(t, err) + + miner.PledgeSectors(ctx, 1, 0, nil) + sl, err := miner.SectorsListNonGenesis(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + + snum := sl[0] + + maddr, err := miner.ActorAddress(ctx) + require.NoError(t, err) + + client.WaitForSectorActive(ctx, t, snum, maddr) + + /* --- Setup verified registry and client and allocate datacap to client */ + + verifierAddr, verifiedClientAddrses := ddoVerifiedSetupVerifiedClient(ctx, t, client, rootKey, verifierKey, []*key.Key{verifiedClientKey}) + verifiedClientAddr := verifiedClientAddrses[0] + + /* --- Prepare piece for onboarding --- */ + + pieceSize := abi.PaddedPieceSize(2048).Unpadded() + pieceData := make([]byte, pieceSize) + _, _ = rand.Read(pieceData) + + dc, err := miner.ComputeDataCid(ctx, pieceSize, bytes.NewReader(pieceData)) + require.NoError(t, err) + + /* --- Allocate datacap for the piece by the verified client --- */ + head, err := client.ChainHead(ctx) + require.NoError(t, err) + bogusAllocationExpiry := head.Height() + 100 + clientId, allocationId := ddoVerifiedSetupAllocations(ctx, t, client, minerId, dc, verifiedClientAddr, bogusAllocationExpiry, 0) + + head, err = client.ChainHead(ctx) + require.NoError(t, err) + + // subscribe to actor events up until the current head + initialEventsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + ToHeight: epochPtr(int64(head.Height())), + }) + require.NoError(t, err) + + /* --- Onboard the piece --- */ + + so, si := ddoVerifiedOnboardPiece(ctx, t, miner, clientId, allocationId, dc, pieceData) + + // check that we have one allocation because the real allocation has been claimed by the miner for the piece + allocations, err := client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, allocations, 1) // allocation has been claimed, leaving the bogus one + + ddoVerifiedRemoveAllocations(ctx, t, client, verifiedClientAddr, clientId) + + // check that we have no more allocations + allocations, err = client.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Len(t, allocations, 0) + + /* --- Tests for ActorEvents --- */ + + t.Logf("Inspecting events as they appear in message receipts") + + // construct ActorEvents from messages and receipts + eventsFromMessages := ddoVerifiedBuildActorEventsFromMessages(ctx, t, miner.FullNode) + fmt.Println("Events from message receipts:") + printEvents(t, eventsFromMessages) + + // check for precisely these events and ensure they contain what we expect; don't be strict on + // other events to make sure we're forward-compatible as new events are added + + { + precommitedEvents := filterEvents(eventsFromMessages, "sector-precommitted") + require.Len(t, precommitedEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-precommitted"), dagcbor.Encode))}, + // first sector to start mining is CC + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)-1), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, precommitedEvents[0].Entries) + + // second sector has our piece + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)), dagcbor.Encode)) + require.Equal(t, expectedEntries, precommitedEvents[1].Entries) + } + + { + activatedEvents := filterEvents(eventsFromMessages, "sector-activated") + require.Len(t, activatedEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("sector-activated"), dagcbor.Encode))}, + // first sector to start mining is CC + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "unsealed-cid", Value: must.One(ipld.Encode(datamodel.Null, dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, activatedEvents[0].Entries) + + // second sector has our piece, and only our piece, so usealed-cid matches piece-cid, + // unfortunately we don't have a case with multiple pieces + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(so.Sector)), dagcbor.Encode)) + expectedEntries[2].Value = must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode)) + expectedEntries = append(expectedEntries, + types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode))}, + types.EventEntry{Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + ) + require.Equal(t, expectedEntries, activatedEvents[1].Entries) + } + + { + verifierBalanceEvents := filterEvents(eventsFromMessages, "verifier-balance") + require.Len(t, verifierBalanceEvents, 2) + + verifierIdAddr, err := client.StateLookupID(ctx, verifierAddr, types.EmptyTSK) + require.NoError(t, err) + verifierId, err := address.IDFromAddress(verifierIdAddr) + require.NoError(t, err) + + verifierEntry := types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "verifier", Value: must.One(ipld.Encode(basicnode.NewInt(int64(verifierId)), dagcbor.Encode))} + require.Len(t, verifierBalanceEvents[0].Entries, 3) // $type, "verifier", "balance" + require.Contains(t, verifierBalanceEvents[0].Entries, verifierEntry) + + require.Len(t, verifierBalanceEvents[1].Entries, 4) // $type, "verifier", "balance", "client" + require.Contains(t, verifierBalanceEvents[1].Entries, verifierEntry) + clientEntry := types.EventEntry{Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))} + require.Contains(t, verifierBalanceEvents[1].Entries, clientEntry) + } + + { + allocationEvents := filterEvents(eventsFromMessages, "allocation") + require.Len(t, allocationEvents, 2) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("allocation"), dagcbor.Encode))}, + // first, bogus, allocation + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: bogusPieceCid}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "expiration", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bogusAllocationExpiry)), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, allocationEvents[0].Entries) + + // the second, real allocation + expectedEntries[1].Value = must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode)) // "id" + expectedEntries[4].Value = must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode)) // "piece-cid" + expectedEntries[8].Value = must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationExpiration), dagcbor.Encode)) // "expiration" + require.Equal(t, expectedEntries, allocationEvents[1].Entries) + } + + { + allocationEvents := filterEvents(eventsFromMessages, "allocation-removed") + require.Len(t, allocationEvents, 1) + + // manual removal of the bogus allocation + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("allocation-removed"), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)-1), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: bogusPieceCid}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "expiration", Value: must.One(ipld.Encode(basicnode.NewInt(int64(bogusAllocationExpiry)), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, allocationEvents[0].Entries) + } + + { + claimEvents := filterEvents(eventsFromMessages, "claim") + require.Len(t, claimEvents, 1) + + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: must.One(ipld.Encode(basicnode.NewString("claim"), dagcbor.Encode))}, + // claimId inherits from its original allocationId + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "id", Value: must.One(ipld.Encode(basicnode.NewInt(int64(allocationId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "client", Value: must.One(ipld.Encode(basicnode.NewInt(int64(clientId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "provider", Value: must.One(ipld.Encode(basicnode.NewInt(int64(minerId)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "piece-cid", Value: must.One(ipld.Encode(basicnode.NewLink(cidlink.Link{Cid: dc.PieceCID}), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "piece-size", Value: must.One(ipld.Encode(basicnode.NewInt(int64(pieceSize.Padded())), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-min", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MinimumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-max", Value: must.One(ipld.Encode(basicnode.NewInt(verifregtypes13.MaximumVerifiedAllocationTerm), dagcbor.Encode))}, + {Flags: 0x01, Codec: uint64(multicodec.Cbor), Key: "term-start", Value: must.One(ipld.Encode(basicnode.NewInt(int64(claimEvents[0].Height)), dagcbor.Encode))}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(si.SectorID)), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, claimEvents[0].Entries) + } + + // verify that we can trace a datacap allocation through to a claim with the events, since this + // information is not completely available from the state tree + claims := ddoVerifiedBuildClaimsFromMessages(ctx, t, eventsFromMessages, miner.FullNode) + for _, claim := range claims { + p, err := address.NewIDAddress(uint64(claim.Provider)) + require.NoError(t, err) + c, err := address.NewIDAddress(uint64(claim.Client)) + require.NoError(t, err) + fmt.Printf("Claim\n", + p, c, claim.Data, claim.Size, claim.TermMin, claim.TermMax, claim.TermStart, claim.Sector) + } + require.Equal(t, []*verifregtypes9.Claim{ + { + Provider: abi.ActorID(minerId), + Client: clientId, + Data: dc.PieceCID, + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: verifregtypes13.MaximumVerifiedAllocationTerm, + TermStart: si.Activation, + Sector: so.Sector, + }, + }, claims) + + // construct ActorEvents from GetActorEventsRaw API + t.Logf("Inspecting full events list from GetActorEventsRaw") + allEvtsFromGetAPI, err := miner.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + fmt.Println("Events from GetActorEventsRaw:") + printEvents(t, allEvtsFromGetAPI) + // compare events from messages and receipts with events from GetActorEventsRaw API + require.Equal(t, eventsFromMessages, allEvtsFromGetAPI) + + // construct ActorEvents from subscription channel for just the miner actor + t.Logf("Inspecting only miner's events list from SubscribeActorEventsRaw") + var subMinerEvts []*types.ActorEvent + for evt := range minerEvtsChan { + subMinerEvts = append(subMinerEvts, evt) + if len(subMinerEvts) == 4 { + break + } + } + var allMinerEvts []*types.ActorEvent + for _, evt := range eventsFromMessages { + if evt.Emitter == miner.ActorAddr { + allMinerEvts = append(allMinerEvts, evt) + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, allMinerEvts, subMinerEvts) + + // construct ActorEvents from subscription channels for just the sector-activated events + var sectorActivatedEvts []*types.ActorEvent + for _, evt := range eventsFromMessages { + for _, entry := range evt.Entries { + if entry.Key == "$type" && bytes.Equal(entry.Value, sectorActivatedCbor) { + sectorActivatedEvts = append(sectorActivatedEvts, evt) + break + } + } + } + require.Len(t, sectorActivatedEvts, 2) // sanity check + + t.Logf("Inspecting only sector-activated events list from real-time SubscribeActorEventsRaw") + var subscribedSectorActivatedEvts []*types.ActorEvent + for evt := range sectorActivatedEvtsChan { + subscribedSectorActivatedEvts = append(subscribedSectorActivatedEvts, evt) + if len(subscribedSectorActivatedEvts) == 2 { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, sectorActivatedEvts, subscribedSectorActivatedEvts) + + // same thing but use historical event fetching to see the same list + t.Logf("Inspecting only sector-activated events list from historical SubscribeActorEventsRaw") + sectorActivatedEvtsChan, err = miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ + Fields: map[string][]types.ActorEventBlock{ + "$type": { + {Codec: uint64(multicodec.Cbor), Value: sectorActivatedCbor}, + }, + }, + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + subscribedSectorActivatedEvts = subscribedSectorActivatedEvts[:0] + for evt := range sectorActivatedEvtsChan { + subscribedSectorActivatedEvts = append(subscribedSectorActivatedEvts, evt) + if len(subscribedSectorActivatedEvts) == 2 { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, sectorActivatedEvts, subscribedSectorActivatedEvts) + + // check that our `ToHeight` filter works as expected + t.Logf("Inspecting only initial list of events SubscribeActorEventsRaw with ToHeight") + var initialEvents []*types.ActorEvent + for evt := range initialEventsChan { + initialEvents = append(initialEvents, evt) + } + // sector-precommitted, sector-activated, verifier-balance, verifier-balance, allocation, allocation + require.Equal(t, eventsFromMessages[0:6], initialEvents) + + // construct ActorEvents from subscription channel for all actor events + t.Logf("Inspecting full events list from historical SubscribeActorEventsRaw") + allEvtsChan, err := miner.FullNode.SubscribeActorEventsRaw(ctx, &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }) + require.NoError(t, err) + var prefillEvts []*types.ActorEvent + for evt := range allEvtsChan { + prefillEvts = append(prefillEvts, evt) + if len(prefillEvts) == len(eventsFromMessages) { + break + } + } + // compare events from messages and receipts with events from subscription channel + require.Equal(t, eventsFromMessages, prefillEvts) + t.Logf("All done comparing events") + + // NOTE: There is a delay in finishing this test because the SubscribeActorEventsRaw + // with the ToHeight (initialEventsChan) has to wait at least a full actual epoch before + // realising that there's no more events for that filter. itests run with a different block + // speed than the ActorEventHandler is aware of. +} + +func ddoVerifiedSetupAllocations( + ctx context.Context, + t *testing.T, + node v1api.FullNode, + minerId uint64, + dc abi.PieceInfo, + verifiedClientAddr address.Address, + bogusAllocExpiration abi.ChainEpoch, // zero if we don't want to make one + tmax abi.ChainEpoch, +) (clientID abi.ActorID, allocationID verifregtypes13.AllocationId) { + if tmax == 0 { + tmax = verifregtypes13.MaximumVerifiedAllocationTerm + } + + var requests []verifregtypes13.AllocationRequest + + if bogusAllocExpiration != 0 { + // design this one to expire so we can observe allocation-removed + allocationRequestBogus := verifregtypes13.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: bogusPieceCid, + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: tmax, + Expiration: bogusAllocExpiration, + } + requests = append(requests, allocationRequestBogus) + } + + allocationRequest := verifregtypes13.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: dc.PieceCID, + Size: dc.Size, + TermMin: verifregtypes13.MinimumVerifiedAllocationTerm, + TermMax: tmax, + Expiration: verifregtypes13.MaximumVerifiedAllocationExpiration, + } + requests = append(requests, allocationRequest) + + allocationRequests := verifregtypes13.AllocationRequests{ + Allocations: requests, + } + + receiverParams, aerr := actors.SerializeParams(&allocationRequests) + require.NoError(t, aerr) + + var amt abi.TokenAmount + amt = big.Mul(big.NewInt(int64(dc.Size)), builtin.TokenPrecision) + if bogusAllocExpiration != 0 { + amt = big.Mul(big.NewInt(int64(dc.Size*2)), builtin.TokenPrecision) + } + + transferParams, aerr := actors.SerializeParams(&datacap2.TransferParams{ + To: builtin.VerifiedRegistryActorAddr, + Amount: amt, + OperatorData: receiverParams, + }) + require.NoError(t, aerr) + + msg := &types.Message{ + To: builtin.DatacapActorAddr, + From: verifiedClientAddr, + Method: datacap.Methods.TransferExported, + Params: transferParams, + Value: big.Zero(), + } + + sm, err := node.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := node.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check that we have an allocation + allocations, err := node.StateGetAllocations(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + if bogusAllocExpiration != 0 { + require.Len(t, allocations, 2) // allocation waiting to be claimed + } else { + require.Len(t, allocations, 1) // allocation waiting to be claimed + } + + for key, value := range allocations { + if value.Data == dc.PieceCID { + allocationID = verifregtypes13.AllocationId(key) + clientID = value.Client + break + } + } + require.NotEqual(t, verifregtypes13.AllocationId(0), allocationID) // found it in there + return clientID, allocationID +} + +func ddoVerifiedOnboardPiece(ctx context.Context, t *testing.T, miner *kit.TestMiner, clientId abi.ActorID, allocationId verifregtypes13.AllocationId, dc abi.PieceInfo, pieceData []byte) (lapi.SectorOffset, lapi.SectorInfo) { + head, err := miner.FullNode.ChainHead(ctx) + require.NoError(t, err) + + so, err := miner.SectorAddPieceToAny(ctx, dc.Size.Unpadded(), bytes.NewReader(pieceData), piece.PieceDealInfo{ + PublishCid: nil, + DealID: 0, + DealProposal: nil, + DealSchedule: piece.DealSchedule{ + StartEpoch: head.Height() + 2880*2, + EndEpoch: head.Height() + 2880*400, + }, + KeepUnsealed: true, + PieceActivationManifest: &minertypes.PieceActivationManifest{ + CID: dc.PieceCID, + Size: dc.Size, + VerifiedAllocationKey: &minertypes13.VerifiedAllocationKey{Client: clientId, ID: allocationId}, + Notify: nil, + }, + }) + require.NoError(t, err) + + // wait for sector to commit + miner.WaitSectorsProving(ctx, map[abi.SectorNumber]struct{}{ + so.Sector: {}, + }) + + // Verify that the piece has been onboarded + + si, err := miner.SectorsStatus(ctx, so.Sector, true) + require.NoError(t, err) + require.Equal(t, dc.PieceCID, *si.CommD) + + require.Equal(t, si.DealWeight, big.Zero()) + require.Equal(t, si.VerifiedDealWeight, big.Mul(big.NewInt(int64(dc.Size)), big.NewInt(int64(si.Expiration-si.Activation)))) + + return so, si +} + +func ddoVerifiedRemoveAllocations(ctx context.Context, t *testing.T, node v1api.FullNode, verifiedClientAddr address.Address, clientId abi.ActorID) { + // trigger an allocation removal + removalParams, aerr := actors.SerializeParams(&verifregtypes13.RemoveExpiredAllocationsParams{Client: clientId}) + require.NoError(t, aerr) + + msg := &types.Message{ + To: builtin.VerifiedRegistryActorAddr, + From: verifiedClientAddr, + Method: verifreg.Methods.RemoveExpiredAllocations, + Params: removalParams, + Value: big.Zero(), + } + + sm, err := node.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := node.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) +} + +func ddoVerifiedBuildClaimsFromMessages(ctx context.Context, t *testing.T, eventsFromMessages []*types.ActorEvent, node v1api.FullNode) []*verifregtypes9.Claim { + claimKeyCbor := must.One(ipld.Encode(basicnode.NewString("claim"), dagcbor.Encode)) + claims := make([]*verifregtypes9.Claim, 0) + for _, event := range eventsFromMessages { + var isClaim bool + var claimId int64 = -1 + var providerId int64 = -1 + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, claimKeyCbor) { + isClaim = true + } else if isClaim && e.Key == "id" { + nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil)) + require.NoError(t, err) + claimId = *bindnode.Unwrap(nd).(*int64) + } else if isClaim && e.Key == "provider" { + nd, err := ipld.DecodeUsingPrototype(e.Value, dagcbor.Decode, bindnode.Prototype((*int64)(nil), nil)) + require.NoError(t, err) + providerId = *bindnode.Unwrap(nd).(*int64) + } + } + if isClaim && claimId != -1 && providerId != -1 { + provider, err := address.NewIDAddress(uint64(providerId)) + require.NoError(t, err) + claim, err := node.StateGetClaim(ctx, provider, verifregtypes9.ClaimId(claimId), types.EmptyTSK) + require.NoError(t, err) + claims = append(claims, claim) + } + } + return claims +} + +func ddoVerifiedBuildActorEventsFromMessages(ctx context.Context, t *testing.T, node v1api.FullNode) []*types.ActorEvent { + actorEvents := make([]*types.ActorEvent, 0) + + head, err := node.ChainHead(ctx) + require.NoError(t, err) + var lastts types.TipSetKey + for height := 0; height < int(head.Height()); height++ { + // for each tipset + ts, err := node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + require.NoError(t, err) + if ts.Key() == lastts { + continue + } + lastts = ts.Key() + messages, err := node.ChainGetMessagesInTipset(ctx, ts.Key()) + require.NoError(t, err) + if len(messages) == 0 { + continue + } + for _, m := range messages { + receipt, err := node.StateSearchMsg(ctx, types.EmptyTSK, m.Cid, -1, false) + require.NoError(t, err) + require.NotNil(t, receipt) + // receipt + if receipt.Receipt.EventsRoot != nil { + events, err := node.ChainGetEvents(ctx, *receipt.Receipt.EventsRoot) + require.NoError(t, err) + for _, evt := range events { + // for each event + addr, err := address.NewIDAddress(uint64(evt.Emitter)) + require.NoError(t, err) + + actorEvents = append(actorEvents, &types.ActorEvent{ + Entries: evt.Entries, + Emitter: addr, + Reverted: false, + Height: ts.Height(), + TipSetKey: ts.Key(), + MsgCid: m.Cid, + }) + } + } + } + } + return actorEvents +} + +func ddoVerifiedSetupVerifiedClient(ctx context.Context, t *testing.T, client *kit.TestFullNode, rootKey *key.Key, verifierKey *key.Key, verifiedClientKeys []*key.Key) (verifierAddr address.Address, ret []address.Address) { + // import the root key. + rootAddr, err := client.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifiers' keys. + verifierAddr, err = client.WalletImport(ctx, &verifierKey.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + for _, k := range verifiedClientKeys { + verifiedClientAddr, err := client.WalletImport(ctx, &k.KeyInfo) + require.NoError(t, err) + ret = append(ret, verifiedClientAddr) + } + + allowance := big.NewInt(100000000000) + params, aerr := actors.SerializeParams(&verifregtypes13.AddVerifierParams{Address: verifierAddr, Allowance: allowance}) + require.NoError(t, aerr) + + msg := &types.Message{ + From: rootAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifier, + Params: params, + Value: big.Zero(), + } + + sm, err := client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err, "AddVerifier failed") + + res, err := client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + verifierAllowance, err := client.StateVerifierStatus(ctx, verifierAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, allowance, *verifierAllowance) + + // assign datacap to clients + for _, ad := range ret { + initialDatacap := big.NewInt(10000) + + params, aerr = actors.SerializeParams(&verifregtypes13.AddVerifiedClientParams{Address: ad, Allowance: initialDatacap}) + require.NoError(t, aerr) + + msg = &types.Message{ + From: verifierAddr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err = client.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = client.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + } + + return +} + +func filterEvents(events []*types.ActorEvent, key string) []*types.ActorEvent { + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + filtered := make([]*types.ActorEvent, 0) + for _, event := range events { + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) { + filtered = append(filtered, event) + break + } + } + } + return filtered +} + +func printEvents(t *testing.T, events []*types.ActorEvent) { + for _, event := range events { + entryStrings := []string{ + fmt.Sprintf("height=%d", event.Height), + fmt.Sprintf("msg=%s", event.MsgCid), + fmt.Sprintf("emitter=%s", event.Emitter), + fmt.Sprintf("reverted=%t", event.Reverted), + } + for _, e := range event.Entries { + // for each event entry + entryStrings = append(entryStrings, fmt.Sprintf("%s=%s", e.Key, eventValueToDagJson(t, e.Codec, e.Value))) + } + fmt.Printf("Event<%s>\n", strings.Join(entryStrings, ", ")) + } +} + +// eventValueToDagJson converts an ActorEvent value to a JSON string for printing. +func eventValueToDagJson(t *testing.T, codec uint64, data []byte) string { + switch codec { + case uint64(multicodec.Cbor): + nd, err := ipld.Decode(data, dagcbor.Decode) + require.NoError(t, err) + byts, err := ipld.Encode(nd, dagjson.Encode) + require.NoError(t, err) + return string(byts) + default: + return fmt.Sprintf("0x%x", data) + } +} + +func epochPtr(ei int64) *abi.ChainEpoch { + ep := abi.ChainEpoch(ei) + return &ep +} diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index a515b0e99..ee17df237 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -20,7 +20,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" @@ -46,9 +45,6 @@ import ( "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" - "github.com/filecoin-project/lotus/cmd/lotus-provider/rpc" - "github.com/filecoin-project/lotus/cmd/lotus-provider/tasks" "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" "github.com/filecoin-project/lotus/cmd/lotus-worker/sealworker" "github.com/filecoin-project/lotus/gateway" @@ -124,17 +120,15 @@ type Ensemble struct { options *ensembleOpts inactive struct { - fullnodes []*TestFullNode - providernodes []*TestProviderNode - miners []*TestMiner - workers []*TestWorker + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker } active struct { - fullnodes []*TestFullNode - providernodes []*TestProviderNode - miners []*TestMiner - workers []*TestWorker - bms map[*TestMiner]*BlockMiner + fullnodes []*TestFullNode + miners []*TestMiner + workers []*TestWorker + bms map[*TestMiner]*BlockMiner } genesis struct { version network.Version @@ -227,20 +221,6 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble { return n } -// FullNode enrolls a new Provider node. -func (n *Ensemble) Provider(lp *TestProviderNode, opts ...NodeOpt) *Ensemble { - options := DefaultNodeOpts - for _, o := range opts { - err := o(&options) - require.NoError(n.t, err) - } - - *lp = TestProviderNode{t: n.t, options: options, Deps: &deps.Deps{}} - - n.inactive.providernodes = append(n.inactive.providernodes, lp) - return n -} - // Miner enrolls a new miner, using the provided full node for chain // interactions. func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble { @@ -713,6 +693,8 @@ func (n *Ensemble) Start() *Ensemble { copy.FullNode = modules.MakeUuidWrapper(copy.FullNode) m.FullNode = © + //m.FullNode.FullNode = modules.MakeUuidWrapper(fn.FullNode) + opts := []node.Option{ node.StorageMiner(&m.StorageMiner, cfg.Subsystems), node.Base(), @@ -720,6 +702,8 @@ func (n *Ensemble) Start() *Ensemble { node.Test(), node.If(m.options.disableLibp2p, node.MockHost(n.mn)), + //node.Override(new(v1api.RawFullNodeAPI), func() api.FullNode { return modules.MakeUuidWrapper(m.FullNode) }), + //node.Override(new(v1api.RawFullNodeAPI), modules.MakeUuidWrapper), node.Override(new(v1api.RawFullNodeAPI), m.FullNode), node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)), @@ -902,28 +886,6 @@ func (n *Ensemble) Start() *Ensemble { // to active, so clear the slice. n.inactive.workers = n.inactive.workers[:0] - for _, p := range n.inactive.providernodes { - - // TODO setup config with options - err := p.Deps.PopulateRemainingDeps(context.Background(), &cli.Context{}, false) - require.NoError(n.t, err) - - shutdownChan := make(chan struct{}) - taskEngine, err := tasks.StartTasks(ctx, p.Deps) - if err != nil { - return nil - } - defer taskEngine.GracefullyTerminate(time.Hour) - - err = rpc.ListenAndServe(ctx, p.Deps, shutdownChan) // Monitor for shutdown. - require.NoError(n.t, err) - finishCh := node.MonitorShutdown(shutdownChan) //node.ShutdownHandler{Component: "rpc server", StopFunc: rpcStopper}, - //node.ShutdownHandler{Component: "provider", StopFunc: stop}, - - <-finishCh - - n.active.providernodes = append(n.active.providernodes, p) - } // --------------------- // MISC // --------------------- @@ -1099,14 +1061,14 @@ func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.Metad info := &pipeline.SectorInfo{ State: pipeline.Proving, SectorNumber: sector.SectorID, - Pieces: []api.SectorPiece{ - { + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(meta.SectorSize), PieceCID: commD, }, DealInfo: nil, // todo: likely possible to get, but not really that useful - }, + }), }, CommD: &commD, CommR: &commR, diff --git a/itests/kit/ensemble_opts.go b/itests/kit/ensemble_opts.go index d264da2bb..95bdd8da8 100644 --- a/itests/kit/ensemble_opts.go +++ b/itests/kit/ensemble_opts.go @@ -35,7 +35,13 @@ var DefaultEnsembleOpts = ensembleOpts{ } // MockProofs activates mock proofs for the entire ensemble. -func MockProofs() EnsembleOpt { +func MockProofs(e ...bool) EnsembleOpt { + if len(e) > 0 && !e[0] { + return func(opts *ensembleOpts) error { + return nil + } + } + return func(opts *ensembleOpts) error { opts.mockProofs = true // since we're using mock proofs, we don't need to download diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index d5bb1930e..18b531e13 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -35,12 +35,12 @@ func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { }) /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version20, + Network: network.Version21, Height: -1, }, stmgr.Upgrade{ - Network: network.Version21, + Network: network.Version22, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV12, + Migration: filcns.UpgradeActorsV13, }) /* inline-gen end */ } diff --git a/itests/kit/ensemble_presets.go b/itests/kit/ensemble_presets.go index 68b85fde0..3ec39cf90 100644 --- a/itests/kit/ensemble_presets.go +++ b/itests/kit/ensemble_presets.go @@ -101,21 +101,6 @@ func EnsembleOneTwo(t *testing.T, opts ...interface{}) (*TestFullNode, *TestMine return &full, &one, &two, ens } -// EnsembleProvider creates and starts an Ensemble with a single full node and a single provider. -// It does not interconnect nodes nor does it begin mining. -func EnsembleProvider(t *testing.T, opts ...interface{}) (*TestFullNode, *TestProviderNode, *Ensemble) { - opts = append(opts, WithAllSubsystems()) - - eopts, nopts := siftOptions(t, opts) - - var ( - full TestFullNode - provider TestProviderNode - ) - ens := NewEnsemble(t, eopts...).FullNode(&full, nopts...).Provider(&provider, nopts...).Start() - return &full, &provider, ens -} - func siftOptions(t *testing.T, opts []interface{}) (eopts []EnsembleOpt, nopts []NodeOpt) { for _, v := range opts { switch o := v.(type) { diff --git a/itests/kit/log.go b/itests/kit/log.go index 0c66427f9..2cb597095 100644 --- a/itests/kit/log.go +++ b/itests/kit/log.go @@ -23,6 +23,7 @@ func QuietMiningLogs() { _ = logging.SetLogLevel("rpc", "ERROR") _ = logging.SetLogLevel("consensus-common", "ERROR") _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") + _ = logging.SetLogLevel("consensus-common", "WARN") } func QuietAllLogsExcept(names ...string) { diff --git a/itests/kit/node_full.go b/itests/kit/node_full.go index 697c59aed..3e80ed688 100644 --- a/itests/kit/node_full.go +++ b/itests/kit/node_full.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet/key" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" "github.com/filecoin-project/lotus/gateway" "github.com/filecoin-project/lotus/node" ) @@ -55,17 +54,6 @@ type TestFullNode struct { options nodeOpts } -// TestProviderNode represents a Provider node enrolled in an Ensemble. -type TestProviderNode struct { - v1api.LotusProviderStruct - - t *testing.T - - *deps.Deps - - options nodeOpts -} - func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode { var wrappedFullNode TestFullNode var fns api.FullNodeStruct diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index 9af284148..1f4f9f6a4 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -1,6 +1,8 @@ package kit import ( + "math" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -63,6 +65,8 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true + cfg.Events.MaxFilterHeightRange = math.MaxInt64 + cfg.Events.EnableActorEventsAPI = true return nil }, }, diff --git a/itests/pending_deal_allocation_test.go b/itests/pending_deal_allocation_test.go index c1e0531cf..60b755ac0 100644 --- a/itests/pending_deal_allocation_test.go +++ b/itests/pending_deal_allocation_test.go @@ -180,10 +180,6 @@ func TestGetAllocationForPendingDeal(t *testing.T) { dealIds, err := ret.DealIDs() require.NoError(t, err) - dealInfo, err := api.StateMarketStorageDeal(ctx, dealIds[0], types.EmptyTSK) - require.NoError(t, err) - require.Equal(t, verifregtypes.AllocationId(0), dealInfo.State.VerifiedClaim) // Allocation in State should not be set yet, because it's in the allocation map - allocation, err := api.StateGetAllocationForPendingDeal(ctx, dealIds[0], types.EmptyTSK) require.NoError(t, err) require.Equal(t, dealProposal.PieceCID, allocation.Data) diff --git a/itests/raft_messagesigner_test.go b/itests/raft_messagesigner_test.go new file mode 100644 index 000000000..220da9699 --- /dev/null +++ b/itests/raft_messagesigner_test.go @@ -0,0 +1,577 @@ +package itests + +import ( + "context" + "crypto/rand" + "fmt" + "reflect" + "testing" + "time" + + "github.com/google/uuid" + gorpc "github.com/libp2p/go-libp2p-gorpc" + libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/messagesigner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + consensus "github.com/filecoin-project/lotus/lib/consensus/raft" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/modules" +) + +func generatePrivKey() (*kit.Libp2p, error) { + privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader) + if err != nil { + return nil, err + } + + peerId, err := peer.IDFromPrivateKey(privkey) + if err != nil { + return nil, err + } + + return &kit.Libp2p{PeerID: peerId, PrivKey: privkey}, nil +} + +func getRaftState(ctx context.Context, t *testing.T, node *kit.TestFullNode) *api.RaftStateData { + raftState, err := node.RaftState(ctx) + require.NoError(t, err) + return raftState +} + +func setup(ctx context.Context, t *testing.T, node0 *kit.TestFullNode, node1 *kit.TestFullNode, node2 *kit.TestFullNode, miner *kit.TestMiner) *kit.Ensemble { + + blockTime := 1 * time.Second + + pkey0, _ := generatePrivKey() + pkey1, _ := generatePrivKey() + pkey2, _ := generatePrivKey() + + pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2} + initPeerSet := []string{} + for _, pkey := range pkeys { + initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String()) + } + + //initPeerSet := []peer.ID{pkey0.PeerID, pkey1.PeerID, pkey2.PeerID} + + raftOps := kit.ConstructorOpts( + node.Override(new(*gorpc.Client), modules.NewRPCClient), + node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig { + cfg := consensus.DefaultClusterRaftConfig() + cfg.InitPeerset = initPeerSet + return cfg + }), + node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)), + node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus), + node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }), + node.Override(new(*modules.RPCHandler), modules.NewRPCHandler), + node.Override(node.GoRPCServer, modules.NewRPCServer), + ) + //raftOps := kit.ConstructorOpts() + + ens := kit.NewEnsemble(t).FullNode(node0, raftOps, kit.ThroughRPC()).FullNode(node1, raftOps, kit.ThroughRPC()).FullNode(node2, raftOps, kit.ThroughRPC()) + node0.AssignPrivKey(pkey0) + node1.AssignPrivKey(pkey1) + node2.AssignPrivKey(pkey2) + + nodes := []*kit.TestFullNode{node0, node1, node2} + wrappedFullNode := kit.MergeFullNodes(nodes) + + ens.MinerEnroll(miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC()) + ens.Start() + + // Import miner wallet to all nodes + addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + + fmt.Println(addr0, addr1, addr2) + + ens.InterconnectAll() + + ens.AddInactiveMiner(miner) + ens.Start() + + ens.InterconnectAll().BeginMining(blockTime) + + return ens +} + +func TestRaftState(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + setup(ctx, t, &node0, &node1, &node2, &miner) + + fmt.Println(node0.WalletList(context.Background())) + fmt.Println(node1.WalletList(context.Background())) + fmt.Println(node2.WalletList(context.Background())) + + bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address) + require.NoError(t, err) + + msgHalfBal := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + + mu := uuid.New() + smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{ + MsgUuid: mu, + }) + require.NoError(t, err) + mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate0 := getRaftState(ctx, t, &node0) + rstate1 := getRaftState(ctx, t, &node1) + rstate2 := getRaftState(ctx, t, &node2) + + require.EqualValues(t, rstate0, rstate1) + require.EqualValues(t, rstate0, rstate2) +} + +func TestRaftStateLeaderDisconnects(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + nodes := []*kit.TestFullNode{&node0, &node1, &node2} + + setup(ctx, t, &node0, &node1, &node2, &miner) + + peerToNode := make(map[peer.ID]*kit.TestFullNode) + for _, n := range nodes { + peerToNode[n.Pkey.PeerID] = n + } + + bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address) + require.NoError(t, err) + + msgHalfBal := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + mu := uuid.New() + smHalfBal, err := node0.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{ + MsgUuid: mu, + }) + require.NoError(t, err) + mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate0 := getRaftState(ctx, t, &node0) + rstate1 := getRaftState(ctx, t, &node1) + rstate2 := getRaftState(ctx, t, &node2) + + require.True(t, reflect.DeepEqual(rstate0, rstate1)) + require.True(t, reflect.DeepEqual(rstate0, rstate2)) + + leader, err := node1.RaftLeader(ctx) + require.NoError(t, err) + leaderNode := peerToNode[leader] + + err = leaderNode.Stop(ctx) + require.NoError(t, err) + oldLeaderNode := leaderNode + + time.Sleep(5 * time.Second) + + newLeader := leader + for _, n := range nodes { + if n != leaderNode { + newLeader, err = n.RaftLeader(ctx) + require.NoError(t, err) + require.NotEqual(t, newLeader, leader) + } + } + + require.NotEqual(t, newLeader, leader) + leaderNode = peerToNode[newLeader] + + msg2 := &types.Message{ + From: miner.OwnerKey.Address, + To: leaderNode.DefaultKey.Address, + Value: big.NewInt(100000), + } + mu2 := uuid.New() + signedMsg2, err := leaderNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{ + MsgUuid: mu2, + }) + require.NoError(t, err) + mLookup, err = leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate := getRaftState(ctx, t, leaderNode) + + for _, n := range nodes { + if n != oldLeaderNode { + rs := getRaftState(ctx, t, n) + require.True(t, reflect.DeepEqual(rs, rstate)) + } + } +} + +func TestRaftStateMiner(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + setup(ctx, t, &node0, &node1, &node2, &miner) + + fmt.Println(node0.WalletList(context.Background())) + fmt.Println(node1.WalletList(context.Background())) + fmt.Println(node2.WalletList(context.Background())) + + bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address) + require.NoError(t, err) + + msgHalfBal := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + mu := uuid.New() + smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{ + MsgUuid: mu, + }) + require.NoError(t, err) + mLookup, err := node0.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate0 := getRaftState(ctx, t, &node0) + rstate1 := getRaftState(ctx, t, &node1) + rstate2 := getRaftState(ctx, t, &node2) + + require.EqualValues(t, rstate0, rstate1) + require.EqualValues(t, rstate0, rstate2) +} + +func TestRaftStateLeaderDisconnectsMiner(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + nodes := []*kit.TestFullNode{&node0, &node1, &node2} + + setup(ctx, t, &node0, &node1, &node2, &miner) + + peerToNode := make(map[peer.ID]*kit.TestFullNode) + for _, n := range nodes { + peerToNode[n.Pkey.PeerID] = n + } + + leader, err := node0.RaftLeader(ctx) + require.NoError(t, err) + leaderNode := peerToNode[leader] + + // Take leader node down + err = leaderNode.Stop(ctx) + require.NoError(t, err) + oldLeaderNode := leaderNode + + time.Sleep(5 * time.Second) + + newLeader := leader + for _, n := range nodes { + if n != leaderNode { + newLeader, err = n.RaftLeader(ctx) + require.NoError(t, err) + require.NotEqual(t, newLeader, leader) + } + } + + require.NotEqual(t, newLeader, leader) + leaderNode = peerToNode[newLeader] + + msg2 := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.NewInt(100000), + } + mu2 := uuid.New() + + signedMsg2, err := miner.FullNode.MpoolPushMessage(ctx, msg2, &api.MessageSendSpec{ + MaxFee: abi.TokenAmount(config.DefaultDefaultMaxFee), + MsgUuid: mu2, + }) + require.NoError(t, err) + + mLookup, err := leaderNode.StateWaitMsg(ctx, signedMsg2.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate := getRaftState(ctx, t, leaderNode) + + for _, n := range nodes { + if n != oldLeaderNode { + rs := getRaftState(ctx, t, n) + require.True(t, reflect.DeepEqual(rs, rstate)) + } + } +} + +// Miner sends message on leader +// Leader disconnects +// Call StateWaitMsg on new leader +func TestLeaderDisconnectsCheckMsgStateOnNewLeader(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + nodes := []*kit.TestFullNode{&node0, &node1, &node2} + + setup(ctx, t, &node0, &node1, &node2, &miner) + + peerToNode := make(map[peer.ID]*kit.TestFullNode) + for _, n := range nodes { + peerToNode[n.Pkey.PeerID] = n + } + + bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address) + require.NoError(t, err) + + msgHalfBal := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + mu := uuid.New() + smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{ + MsgUuid: mu, + }) + require.NoError(t, err) + + leader, err := node0.RaftLeader(ctx) + require.NoError(t, err) + leaderNode := peerToNode[leader] + + // Take leader node down + err = leaderNode.Stop(ctx) + require.NoError(t, err) + oldLeaderNode := leaderNode + + time.Sleep(5 * time.Second) + + // Check if all active nodes update their leader + newLeader := leader + for _, n := range nodes { + if n != leaderNode { + newLeader, err = n.RaftLeader(ctx) + require.NoError(t, err) + require.NotEqual(t, newLeader, leader) + } + } + + require.NotEqual(t, newLeader, leader) + leaderNode = peerToNode[newLeader] + + mLookup, err := leaderNode.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + rstate := getRaftState(ctx, t, leaderNode) + + // Check if Raft state is consistent on all active nodes + for _, n := range nodes { + if n != oldLeaderNode { + rs := getRaftState(ctx, t, n) + require.True(t, reflect.DeepEqual(rs, rstate)) + } + } +} + +func TestChainStoreSync(t *testing.T) { + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + miner kit.TestMiner + ) + + nodes := []*kit.TestFullNode{&node0, &node1, &node2} + + setup(ctx, t, &node0, &node1, &node2, &miner) + + peerToNode := make(map[peer.ID]*kit.TestFullNode) + for _, n := range nodes { + peerToNode[n.Pkey.PeerID] = n + } + + bal, err := node0.WalletBalance(ctx, node0.DefaultKey.Address) + require.NoError(t, err) + + leader, err := node0.RaftLeader(ctx) + require.NoError(t, err) + leaderNode := peerToNode[leader] + + msgHalfBal := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + mu := uuid.New() + smHalfBal, err := miner.FullNode.MpoolPushMessage(ctx, msgHalfBal, &api.MessageSendSpec{ + MsgUuid: mu, + }) + require.NoError(t, err) + + for _, n := range nodes { + fmt.Println(n != leaderNode) + if n != leaderNode { + mLookup, err := n.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + //break + } + } +} + +func TestGoRPCAuth(t *testing.T) { + // TODO Fix Raft, then enable this test. https://github.com/filecoin-project/lotus/issues/9888 + t.SkipNow() + + blockTime := 1 * time.Second + + kit.QuietMiningLogs() + ctx := context.Background() + + var ( + node0 kit.TestFullNode + node1 kit.TestFullNode + node2 kit.TestFullNode + node3 kit.TestFullNode + miner kit.TestMiner + ) + + pkey0, _ := generatePrivKey() + pkey1, _ := generatePrivKey() + pkey2, _ := generatePrivKey() + + pkeys := []*kit.Libp2p{pkey0, pkey1, pkey2} + initPeerSet := []string{} + for _, pkey := range pkeys { + initPeerSet = append(initPeerSet, "/p2p/"+pkey.PeerID.String()) + } + + raftOps := kit.ConstructorOpts( + node.Override(new(*gorpc.Client), modules.NewRPCClient), + node.Override(new(*consensus.ClusterRaftConfig), func() *consensus.ClusterRaftConfig { + cfg := consensus.DefaultClusterRaftConfig() + cfg.InitPeerset = initPeerSet + return cfg + }), + node.Override(new(*consensus.Consensus), consensus.NewConsensusWithRPCClient(false)), + node.Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus), + node.Override(new(messagesigner.MsgSigner), func(ms *messagesigner.MessageSignerConsensus) *messagesigner.MessageSignerConsensus { return ms }), + node.Override(new(*modules.RPCHandler), modules.NewRPCHandler), + node.Override(node.GoRPCServer, modules.NewRPCServer), + ) + //raftOps := kit.ConstructorOpts() + + ens := kit.NewEnsemble(t).FullNode(&node0, raftOps, kit.ThroughRPC()).FullNode(&node1, raftOps, kit.ThroughRPC()).FullNode(&node2, raftOps, kit.ThroughRPC()).FullNode(&node3, raftOps) + node0.AssignPrivKey(pkey0) + node1.AssignPrivKey(pkey1) + node2.AssignPrivKey(pkey2) + + nodes := []*kit.TestFullNode{&node0, &node1, &node2} + wrappedFullNode := kit.MergeFullNodes(nodes) + + ens.MinerEnroll(&miner, wrappedFullNode, kit.WithAllSubsystems(), kit.ThroughRPC()) + ens.Start() + + // Import miner wallet to all nodes + addr0, err := node0.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + addr1, err := node1.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + addr2, err := node2.WalletImport(ctx, &miner.OwnerKey.KeyInfo) + require.NoError(t, err) + + fmt.Println(addr0, addr1, addr2) + + ens.InterconnectAll() + + ens.AddInactiveMiner(&miner) + ens.Start() + + ens.InterconnectAll().BeginMining(blockTime) + + leader, err := node0.RaftLeader(ctx) + require.NoError(t, err) + + client := node3.FullNode.(*impl.FullNodeAPI).RaftAPI.MessageSigner.Consensus.RpcClient + method := "MpoolPushMessage" + + msg := &types.Message{ + From: miner.OwnerKey.Address, + To: node0.DefaultKey.Address, + Value: big.NewInt(100000), + } + msgWhole := &api.MpoolMessageWhole{Msg: msg} + var ret types.SignedMessage + + err = client.CallContext(ctx, leader, "Consensus", method, msgWhole, &ret) + require.True(t, gorpc.IsAuthorizationError(err)) + +} diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go index b4e5c1133..eb93cfe9a 100644 --- a/itests/sector_pledge_test.go +++ b/itests/sector_pledge_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/api" @@ -39,7 +40,7 @@ func TestPledgeSectors(t *testing.T) { defer cancel() _, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMining(blockTime) + ens.InterconnectAll().BeginMiningMustPost(blockTime) miner.PledgeSectors(ctx, nSectors, 0, nil) } @@ -65,12 +66,18 @@ func TestPledgeBatching(t *testing.T) { //stm: @SECTOR_PRE_COMMIT_FLUSH_001, @SECTOR_COMMIT_FLUSH_001 blockTime := 50 * time.Millisecond - runTest := func(t *testing.T, nSectors int) { + runTest := func(t *testing.T, nSectors int, aggregate bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs()) - ens.InterconnectAll().BeginMining(blockTime) + kit.QuietMiningLogs() + + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(!aggregate), kit.MutateSealingConfig(func(sc *config.SealingConfig) { + if aggregate { + sc.AggregateAboveBaseFee = types.FIL(big.Zero()) + } + })) + ens.InterconnectAll().BeginMiningMustPost(blockTime) client.WaitTillChain(ctx, kit.HeightAtLeast(10)) @@ -114,7 +121,10 @@ func TestPledgeBatching(t *testing.T) { } t.Run("100", func(t *testing.T) { - runTest(t, 100) + runTest(t, 100, false) + }) + t.Run("10-agg", func(t *testing.T) { + runTest(t, 10, true) }) } diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go index 34b325f2a..f3524c198 100644 --- a/itests/sector_terminate_test.go +++ b/itests/sector_terminate_test.go @@ -2,10 +2,15 @@ package itests import ( + "bytes" "context" "testing" "time" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/node/basicnode" + "github.com/multiformats/go-multicodec" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-bitfield" @@ -13,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/lib/must" sealing "github.com/filecoin-project/lotus/storage/pipeline" ) @@ -164,4 +170,31 @@ loop: require.Equal(t, p.MinerPower, p.TotalPower) require.Equal(t, types.NewInt(uint64(ssz)*uint64(nSectors-1)), p.MinerPower.RawBytePower) + + // check "sector-terminated" actor event + var epochZero abi.ChainEpoch + allEvents, err := miner.FullNode.GetActorEventsRaw(ctx, &types.ActorEventFilter{ + FromHeight: &epochZero, + }) + require.NoError(t, err) + for _, key := range []string{"sector-precommitted", "sector-activated", "sector-terminated"} { + var found bool + keyBytes := must.One(ipld.Encode(basicnode.NewString(key), dagcbor.Encode)) + for _, event := range allEvents { + for _, e := range event.Entries { + if e.Key == "$type" && bytes.Equal(e.Value, keyBytes) { + found = true + if key == "sector-terminated" { + expectedEntries := []types.EventEntry{ + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "$type", Value: keyBytes}, + {Flags: 0x03, Codec: uint64(multicodec.Cbor), Key: "sector", Value: must.One(ipld.Encode(basicnode.NewInt(int64(toTerminate)), dagcbor.Encode))}, + } + require.Equal(t, expectedEntries, event.Entries) + } + break + } + } + } + require.True(t, found, "expected to find event %s", key) + } } diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go index ffe50c72b..07a31477d 100644 --- a/itests/verifreg_test.go +++ b/itests/verifreg_test.go @@ -503,3 +503,152 @@ func makeVerifier(ctx context.Context, t *testing.T, api *impl.FullNodeAPI, root require.NoError(t, err) require.Equal(t, allowance, *verifierAllowance) } + +func TestVerifiedListAllAllocationsAndClaims(t *testing.T) { + blockTime := 100 * time.Millisecond + + rootKey, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifier1Key, err := key.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + verifiedClientKey, err := key.GenerateKey(types.KTBLS) + require.NoError(t, err) + + bal, err := types.ParseFIL("100fil") + require.NoError(t, err) + + node, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), + kit.RootVerifier(rootKey, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifier1Key, abi.NewTokenAmount(bal.Int64())), + kit.Account(verifiedClientKey, abi.NewTokenAmount(bal.Int64())), + ) + + ens.InterconnectAll().BeginMining(blockTime) + + api := node.FullNode.(*impl.FullNodeAPI) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // get VRH + vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) + fmt.Println(vrh.String()) + require.NoError(t, err) + + // import the root key. + rootAddr, err := api.WalletImport(ctx, &rootKey.KeyInfo) + require.NoError(t, err) + + // import the verifiers' keys. + verifier1Addr, err := api.WalletImport(ctx, &verifier1Key.KeyInfo) + require.NoError(t, err) + + // import the verified client's key. + verifiedClientAddr, err := api.WalletImport(ctx, &verifiedClientKey.KeyInfo) + require.NoError(t, err) + + // resolve all keys + + // make the 2 verifiers + + makeVerifier(ctx, t, api, rootAddr, verifier1Addr) + + // assign datacap to a client + initialDatacap := big.NewInt(20000) + + params, err := actors.SerializeParams(&verifregst.AddVerifiedClientParams{Address: verifiedClientAddr, Allowance: initialDatacap}) + require.NoError(t, err) + + msg := &types.Message{ + From: verifier1Addr, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + Params: params, + Value: big.Zero(), + } + + sm, err := api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + // check datacap balance + dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + require.Equal(t, *dcap, initialDatacap) + + minerId, err := address.IDFromAddress(miner.ActorAddr) + require.NoError(t, err) + + allocationRequest1 := verifregst.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: cid.MustParse("baga6ea4seaaqa"), + Size: abi.PaddedPieceSize(initialDatacap.Uint64() / 2), + TermMin: verifregst.MinimumVerifiedAllocationTerm, + TermMax: verifregst.MinimumVerifiedAllocationTerm, + Expiration: verifregst.MaximumVerifiedAllocationExpiration, + } + + allocationRequest2 := verifregst.AllocationRequest{ + Provider: abi.ActorID(minerId), + Data: cid.MustParse("baga6ea4seaaqc"), + Size: abi.PaddedPieceSize(initialDatacap.Uint64() / 2), + TermMin: verifregst.MinimumVerifiedAllocationTerm, + TermMax: verifregst.MinimumVerifiedAllocationTerm, + Expiration: verifregst.MaximumVerifiedAllocationExpiration, + } + + allocationRequests := verifregst.AllocationRequests{ + Allocations: []verifregst.AllocationRequest{allocationRequest1, allocationRequest2}, + } + + receiverParams, err := actors.SerializeParams(&allocationRequests) + require.NoError(t, err) + + transferParams, err := actors.SerializeParams(&datacap2.TransferParams{ + To: builtin.VerifiedRegistryActorAddr, + Amount: big.Mul(initialDatacap, builtin.TokenPrecision), + OperatorData: receiverParams, + }) + require.NoError(t, err) + + msg = &types.Message{ + To: builtin.DatacapActorAddr, + From: verifiedClientAddr, + Method: datacap.Methods.TransferExported, + Params: transferParams, + Value: big.Zero(), + } + + sm, err = api.MpoolPushMessage(ctx, msg, nil) + require.NoError(t, err) + + res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) + require.NoError(t, err) + require.EqualValues(t, 0, res.Receipt.ExitCode) + + allocations, err := api.StateGetAllAllocations(ctx, types.EmptyTSK) + require.NoError(t, err) + + require.Equal(t, 2, len(allocations)) + + var pcids []string + + for _, a := range allocations { + clientIdAddr, err := api.StateLookupID(ctx, verifiedClientAddr, types.EmptyTSK) + require.NoError(t, err) + clientId, err := address.IDFromAddress(clientIdAddr) + require.NoError(t, err) + require.Equal(t, abi.ActorID(clientId), a.Client) + require.Equal(t, abi.ActorID(minerId), a.Provider) + require.Equal(t, abi.PaddedPieceSize(10000), a.Size) + pcids = append(pcids, a.Data.String()) + } + + require.ElementsMatch(t, []string{"baga6ea4seaaqa", "baga6ea4seaaqc"}, pcids) + + // TODO: Add claims check to this test once https://github.com/filecoin-project/lotus/pull/11618 lands +} diff --git a/journal/fsjournal/fs.go b/journal/fsjournal/fs.go index 5a74fbfc6..b2eb946fd 100644 --- a/journal/fsjournal/fs.go +++ b/journal/fsjournal/fs.go @@ -7,7 +7,6 @@ import ( "path/filepath" logging "github.com/ipfs/go-log/v2" - "github.com/mitchellh/go-homedir" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/build" @@ -42,11 +41,6 @@ func OpenFSJournal(lr repo.LockedRepo, disabled journal.DisabledEvents) (journal } func OpenFSJournalPath(path string, disabled journal.DisabledEvents) (journal.Journal, error) { - path, err := homedir.Expand(path) - if err != nil { - return nil, xerrors.Errorf("failed to expand repo path: %w", err) - } - dir := filepath.Join(path, "journal") if err := os.MkdirAll(dir, 0755); err != nil { return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err) diff --git a/lib/consensus/raft/config.go b/lib/consensus/raft/config.go new file mode 100644 index 000000000..bdd82c108 --- /dev/null +++ b/lib/consensus/raft/config.go @@ -0,0 +1,135 @@ +package consensus + +import ( + "io" + "path/filepath" + "time" + + hraft "github.com/hashicorp/raft" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/repo" +) + +// Configuration defaults +var ( + DefaultDataSubFolder = "raft-cluster" + DefaultWaitForLeaderTimeout = 15 * time.Second + DefaultCommitRetries = 1 + DefaultNetworkTimeout = 100 * time.Second + DefaultCommitRetryDelay = 200 * time.Millisecond + DefaultBackupsRotate = 6 +) + +// ClusterRaftConfig allows to configure the Raft Consensus component for the node cluster. +type ClusterRaftConfig struct { + // config to enabled node cluster with raft consensus + ClusterModeEnabled bool + // A folder to store Raft's data. + DataFolder string + // InitPeerset provides the list of initial cluster peers for new Raft + // peers (with no prior state). It is ignored when Raft was already + // initialized or when starting in staging mode. + InitPeerset []string + // LeaderTimeout specifies how long to wait for a leader before + // failing an operation. + WaitForLeaderTimeout time.Duration + // NetworkTimeout specifies how long before a Raft network + // operation is timed out + NetworkTimeout time.Duration + // CommitRetries specifies how many times we retry a failed commit until + // we give up. + CommitRetries int + // How long to wait between retries + CommitRetryDelay time.Duration + // BackupsRotate specifies the maximum number of Raft's DataFolder + // copies that we keep as backups (renaming) after cleanup. + BackupsRotate int + // A Hashicorp Raft's configuration object. + RaftConfig *hraft.Config + + // Tracing enables propagation of contexts across binary boundaries. + Tracing bool +} + +func DefaultClusterRaftConfig() *ClusterRaftConfig { + var cfg ClusterRaftConfig + cfg.DataFolder = "" // empty so it gets omitted + cfg.InitPeerset = []string{} + cfg.WaitForLeaderTimeout = DefaultWaitForLeaderTimeout + cfg.NetworkTimeout = DefaultNetworkTimeout + cfg.CommitRetries = DefaultCommitRetries + cfg.CommitRetryDelay = DefaultCommitRetryDelay + cfg.BackupsRotate = DefaultBackupsRotate + cfg.RaftConfig = hraft.DefaultConfig() + + // These options are imposed over any Default Raft Config. + cfg.RaftConfig.ShutdownOnRemove = false + cfg.RaftConfig.LocalID = "will_be_set_automatically" + + // Set up logging + cfg.RaftConfig.LogOutput = io.Discard + return &cfg +} + +func NewClusterRaftConfig(userRaftConfig *config.UserRaftConfig) *ClusterRaftConfig { + var cfg ClusterRaftConfig + cfg.DataFolder = userRaftConfig.DataFolder + cfg.InitPeerset = userRaftConfig.InitPeersetMultiAddr + cfg.WaitForLeaderTimeout = time.Duration(userRaftConfig.WaitForLeaderTimeout) + cfg.NetworkTimeout = time.Duration(userRaftConfig.NetworkTimeout) + cfg.CommitRetries = userRaftConfig.CommitRetries + cfg.CommitRetryDelay = time.Duration(userRaftConfig.CommitRetryDelay) + cfg.BackupsRotate = userRaftConfig.BackupsRotate + + // Keep this to be default hraft config for now + cfg.RaftConfig = hraft.DefaultConfig() + + // These options are imposed over any Default Raft Config. + cfg.RaftConfig.ShutdownOnRemove = false + cfg.RaftConfig.LocalID = "will_be_set_automatically" + + // Set up logging + cfg.RaftConfig.LogOutput = io.Discard + + return &cfg + +} + +// Validate checks that this configuration has working values, +// at least in appearance. +func ValidateConfig(cfg *ClusterRaftConfig) error { + if cfg.RaftConfig == nil { + return xerrors.Errorf("no hashicorp/raft.Config") + } + if cfg.WaitForLeaderTimeout <= 0 { + return xerrors.Errorf("wait_for_leader_timeout <= 0") + } + + if cfg.NetworkTimeout <= 0 { + return xerrors.Errorf("network_timeout <= 0") + } + + if cfg.CommitRetries < 0 { + return xerrors.Errorf("commit_retries is invalid") + } + + if cfg.CommitRetryDelay <= 0 { + return xerrors.Errorf("commit_retry_delay is invalid") + } + + if cfg.BackupsRotate <= 0 { + return xerrors.Errorf("backups_rotate should be larger than 0") + } + + return hraft.ValidateConfig(cfg.RaftConfig) +} + +// GetDataFolder returns the Raft data folder that we are using. +func (cfg *ClusterRaftConfig) GetDataFolder(repo repo.LockedRepo) string { + if cfg.DataFolder == "" { + return filepath.Join(repo.Path(), DefaultDataSubFolder) + } + return filepath.Join(repo.Path(), cfg.DataFolder) +} diff --git a/lib/consensus/raft/consensus.go b/lib/consensus/raft/consensus.go new file mode 100644 index 000000000..d74f200fa --- /dev/null +++ b/lib/consensus/raft/consensus.go @@ -0,0 +1,512 @@ +// Package raft implements a Consensus component for IPFS Cluster which uses +// Raft (go-libp2p-raft). +package consensus + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + "time" + + "github.com/google/uuid" + "golang.org/x/exp/slices" + + addr "github.com/filecoin-project/go-address" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/addrutil" + "github.com/filecoin-project/lotus/node/repo" + + //ds "github.com/ipfs/go-datastore" + logging "github.com/ipfs/go-log/v2" + consensus "github.com/libp2p/go-libp2p-consensus" + rpc "github.com/libp2p/go-libp2p-gorpc" + libp2praft "github.com/libp2p/go-libp2p-raft" + host "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" +) + +var logger = logging.Logger("raft") + +type RaftState struct { + NonceMap api.NonceMapType + MsgUuids api.MsgUuidMapType + + // TODO: add comment explaining why this is needed + // We need a reference to the messagepool in the raft state in order to + // sync messages that have been sent by the leader node + // Miner calls StateWaitMsg after MpoolPushMessage to check if the message has + // landed on chain. This check requires the message be stored in the local chainstore + // If a leadernode goes down after sending a message to the chain and is replaced by + // another node, the other node needs to have this message in its chainstore for the + // above check to succeed. + + // This is because the miner only stores signed CIDs but the message received from in a + // block will be unsigned (for BLS). Hence, the process relies on the node to store the + // signed message which holds a copy of the unsigned message to properly perform all the + // needed checks + Mpool *messagepool.MessagePool +} + +func newRaftState(mpool *messagepool.MessagePool) *RaftState { + return &RaftState{ + NonceMap: make(map[addr.Address]uint64), + MsgUuids: make(map[uuid.UUID]*types.SignedMessage), + Mpool: mpool, + } +} + +type ConsensusOp struct { + Nonce uint64 `codec:"nonce,omitempty"` + Uuid uuid.UUID `codec:"uuid,omitempty"` + Addr addr.Address `codec:"addr,omitempty"` + SignedMsg *types.SignedMessage `codec:"signedMsg,omitempty"` +} + +func (c ConsensusOp) ApplyTo(state consensus.State) (consensus.State, error) { + s := state.(*RaftState) + s.NonceMap[c.Addr] = c.Nonce + if c.SignedMsg != nil { + + // Deep copy to tmp + var buffer bytes.Buffer + err := c.SignedMsg.MarshalCBOR(&buffer) + if err != nil { + return nil, err + } + tmp, err := types.DecodeSignedMessage(buffer.Bytes()) + if err != nil { + return nil, err + } + s.MsgUuids[c.Uuid] = tmp + + _, err = s.Mpool.Push(context.TODO(), tmp, false) + // Since this is only meant to keep messages in sync, ignore any error which + // shows the message already exists in the mpool + if err != nil && !api.ErrorIsIn(err, []error{messagepool.ErrExistingNonce}) { + return nil, err + } + } + + return s, nil +} + +var _ consensus.Op = &ConsensusOp{} + +// Consensus handles the work of keeping a shared-state between +// the peers of a Lotus Cluster, as well as modifying that state and +// applying any updates in a thread-safe manner. +type Consensus struct { + ctx context.Context + cancel func() + config *ClusterRaftConfig + + host host.Host + + consensus consensus.OpLogConsensus + actor consensus.Actor + raft *raftWrapper + state *RaftState + + RpcClient *rpc.Client + rpcReady chan struct{} + readyCh chan struct{} + + peerSet []peer.ID + repo repo.LockedRepo +} + +// NewConsensus builds a new ClusterConsensus component using Raft. +// +// Raft saves state snapshots regularly and persists log data in a bolt +// datastore. Therefore, unless memory usage is a concern, it is recommended +// to use an in-memory go-datastore as store parameter. +// +// The staging parameter controls if the Raft peer should start in +// staging mode (used when joining a new Raft peerset with other peers). +func NewConsensus(host host.Host, cfg *ClusterRaftConfig, mpool *messagepool.MessagePool, repo repo.LockedRepo, staging bool) (*Consensus, error) { + err := ValidateConfig(cfg) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + + logger.Debug("starting Consensus and waiting for a leader...") + state := newRaftState(mpool) + + consensus := libp2praft.NewOpLog(state, &ConsensusOp{}) + + raft, err := newRaftWrapper(host, cfg, consensus.FSM(), repo, staging) + if err != nil { + logger.Error("error creating raft: ", err) + cancel() + return nil, err + } + actor := libp2praft.NewActor(raft.raft) + consensus.SetActor(actor) + + peers := []peer.ID{} + addrInfos, err := addrutil.ParseAddresses(ctx, cfg.InitPeerset) + if err != nil { + logger.Error("error parsing addresses: ", err) + cancel() + return nil, err + } + + for _, addrInfo := range addrInfos { + peers = append(peers, addrInfo.ID) + + // Add peer to address book + host.Peerstore().AddAddrs(addrInfo.ID, addrInfo.Addrs, time.Hour*100) + } + + cc := &Consensus{ + ctx: ctx, + cancel: cancel, + config: cfg, + host: host, + consensus: consensus, + actor: actor, + state: state, + raft: raft, + peerSet: peers, + rpcReady: make(chan struct{}, 1), + readyCh: make(chan struct{}, 1), + repo: repo, + } + + go cc.finishBootstrap() + return cc, nil + +} + +// TODO: Merge with NewConsensus and remove the rpcReady chan +func NewConsensusWithRPCClient(staging bool) func(host host.Host, + cfg *ClusterRaftConfig, + rpcClient *rpc.Client, + mpool *messagepool.MessagePool, + repo repo.LockedRepo, +) (*Consensus, error) { + + return func(host host.Host, cfg *ClusterRaftConfig, rpcClient *rpc.Client, mpool *messagepool.MessagePool, repo repo.LockedRepo) (*Consensus, error) { + cc, err := NewConsensus(host, cfg, mpool, repo, staging) + if err != nil { + return nil, err + } + cc.RpcClient = rpcClient + cc.rpcReady <- struct{}{} + return cc, nil + } +} + +// WaitForSync waits for a leader and for the state to be up to date, then returns. +func (cc *Consensus) WaitForSync(ctx context.Context) error { + + leaderCtx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout) + defer cancel() + + // 1 - wait for leader + // 2 - wait until we are a Voter + // 3 - wait until last index is applied + + // From raft docs: + + // once a staging server receives enough log entries to be sufficiently + // caught up to the leader's log, the leader will invoke a membership + // change to change the Staging server to a Voter + + // Thus, waiting to be a Voter is a guarantee that we have a reasonable + // up to date state. Otherwise, we might return too early (see + // https://github.com/ipfs-cluster/ipfs-cluster/issues/378) + + _, err := cc.raft.WaitForLeader(leaderCtx) + if err != nil { + return errors.New("error waiting for leader: " + err.Error()) + } + + err = cc.raft.WaitForVoter(ctx) + if err != nil { + return errors.New("error waiting to become a Voter: " + err.Error()) + } + + err = cc.raft.WaitForUpdates(ctx) + if err != nil { + return errors.New("error waiting for consensus updates: " + err.Error()) + } + return nil +} + +// waits until there is a consensus leader and syncs the state +// to the tracker. If errors happen, this will return and never +// signal the component as Ready. +func (cc *Consensus) finishBootstrap() { + // wait until we have RPC to perform any actions. + select { + case <-cc.ctx.Done(): + return + case <-cc.rpcReady: + } + + // Sometimes bootstrap is a no-Op. It only applies when + // no state exists and staging=false. + _, err := cc.raft.Bootstrap() + if err != nil { + return + } + + logger.Debugf("Bootstrap finished") + err = cc.WaitForSync(cc.ctx) + if err != nil { + return + } + logger.Debug("Raft state is now up to date") + logger.Debug("consensus ready") + cc.readyCh <- struct{}{} +} + +// Shutdown stops the component so it will not process any +// more updates. The underlying consensus is permanently +// shutdown, along with the libp2p transport. +func (cc *Consensus) Shutdown(ctx context.Context) error { + + logger.Info("stopping Consensus component") + + // Raft Shutdown + err := cc.raft.Shutdown(ctx) + if err != nil { + logger.Error(err) + } + + cc.cancel() + close(cc.rpcReady) + return nil +} + +// Ready returns a channel which is signaled when the Consensus +// algorithm has finished bootstrapping and is ready to use +func (cc *Consensus) Ready(ctx context.Context) <-chan struct{} { + return cc.readyCh +} + +// IsTrustedPeer returns true. In Raft we trust all peers. +func (cc *Consensus) IsTrustedPeer(ctx context.Context, p peer.ID) bool { + return slices.Contains(cc.peerSet, p) +} + +// Trust is a no-Op. +func (cc *Consensus) Trust(ctx context.Context, pid peer.ID) error { return nil } + +// Distrust is a no-Op. +func (cc *Consensus) Distrust(ctx context.Context, pid peer.ID) error { return nil } + +// returns true if the operation was redirected to the leader +// note that if the leader just dissappeared, the rpc call will +// fail because we haven't heard that it's gone. +func (cc *Consensus) RedirectToLeader(method string, arg interface{}, ret interface{}) (bool, error) { + ctx := cc.ctx + + var finalErr error + + // Retry redirects + for i := 0; i <= cc.config.CommitRetries; i++ { + logger.Debugf("redirect try %d", i) + leader, err := cc.Leader(ctx) + + // No leader, wait for one + if err != nil { + logger.Warn("there seems to be no leader. Waiting for one") + rctx, cancel := context.WithTimeout(ctx, cc.config.WaitForLeaderTimeout) + defer cancel() + pidstr, err := cc.raft.WaitForLeader(rctx) + + // means we timed out waiting for a leader + // we don't retry in this case + if err != nil { + return false, fmt.Errorf("timed out waiting for leader: %s", err) + } + leader, err = peer.Decode(pidstr) + if err != nil { + return false, err + } + } + + logger.Infof("leader: %s, curr host: %s, peerSet: %s", leader, cc.host.ID(), cc.peerSet) + + // We are the leader. Do not redirect + if leader == cc.host.ID() { + return false, nil + } + + logger.Debugf("redirecting %s to leader: %s", method, leader) + finalErr = cc.RpcClient.CallContext( + ctx, + leader, + "Consensus", + method, + arg, + ret, + ) + if finalErr != nil { + logger.Errorf("retrying to redirect request to leader: %s", finalErr) + time.Sleep(2 * cc.config.RaftConfig.HeartbeatTimeout) + continue + } + break + } + + // We tried to redirect, but something happened + return true, finalErr +} + +// commit submits a cc.consensus commit. It retries upon failures. +func (cc *Consensus) Commit(ctx context.Context, op *ConsensusOp) error { + + var finalErr error + for i := 0; i <= cc.config.CommitRetries; i++ { + logger.Debugf("attempt #%d: committing %+v", i, op) + + // this means we are retrying + if finalErr != nil { + logger.Errorf("retrying upon failed commit (retry %d): %s ", + i, finalErr) + } + + // Being here means we are the LEADER. We can commit. + // now commit the changes to our state + _, finalErr = cc.consensus.CommitOp(op) + if finalErr != nil { + goto RETRY + } + + RETRY: + time.Sleep(cc.config.CommitRetryDelay) + } + return finalErr +} + +// AddPeer adds a new peer to participate in this consensus. It will +// forward the operation to the leader if this is not it. +func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error { + var finalErr error + for i := 0; i <= cc.config.CommitRetries; i++ { + logger.Debugf("attempt #%d: AddPeer %s", i, pid) + if finalErr != nil { + logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr) + } + ok, err := cc.RedirectToLeader("AddPeer", pid, struct{}{}) + if err != nil || ok { + return err + } + // Being here means we are the leader and can commit + finalErr = cc.raft.AddPeer(ctx, pid) + if finalErr != nil { + time.Sleep(cc.config.CommitRetryDelay) + continue + } + logger.Infof("peer added to Raft: %s", pid) + break + } + return finalErr +} + +// RmPeer removes a peer from this consensus. It will +// forward the operation to the leader if this is not it. +func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error { + var finalErr error + for i := 0; i <= cc.config.CommitRetries; i++ { + logger.Debugf("attempt #%d: RmPeer %s", i, pid) + if finalErr != nil { + logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr) + } + ok, err := cc.RedirectToLeader("RmPeer", pid, struct{}{}) + if err != nil || ok { + return err + } + // Being here means we are the leader and can commit + finalErr = cc.raft.RemovePeer(ctx, pid.String()) + if finalErr != nil { + time.Sleep(cc.config.CommitRetryDelay) + continue + } + logger.Infof("peer removed from Raft: %s", pid) + break + } + return finalErr +} + +// RaftState retrieves the current consensus RaftState. It may error if no RaftState has +// been agreed upon or the state is not consistent. The returned RaftState is the +// last agreed-upon RaftState known by this node. No writes are allowed, as all +// writes to the shared state should happen through the Consensus component +// methods. +func (cc *Consensus) State(ctx context.Context) (*RaftState, error) { + st, err := cc.consensus.GetLogHead() + if err == libp2praft.ErrNoState { + return newRaftState(nil), nil + } + + if err != nil { + return nil, err + } + state, ok := st.(*RaftState) + if !ok { + return nil, errors.New("wrong state type") + } + return state, nil +} + +// Leader returns the peerID of the Leader of the +// cluster. It returns an error when there is no leader. +func (cc *Consensus) Leader(ctx context.Context) (peer.ID, error) { + // Note the hard-dependency on raft here... + raftactor := cc.actor.(*libp2praft.Actor) + return raftactor.Leader() +} + +// Clean removes the Raft persisted state. +func (cc *Consensus) Clean(ctx context.Context) error { + //return CleanupRaft(cc.config) + return nil +} + +//Rollback replaces the current agreed-upon +//state with the state provided. Only the consensus leader +//can perform this operation. +//func (cc *Consensus) Rollback(state RaftState) error { +// // This is unused. It *might* be used for upgrades. +// // There is rather untested magic in libp2p-raft's FSM() +// // to make this possible. +// return cc.consensus.Rollback(state) +//} + +// Peers return the current list of peers in the consensus. +// The list will be sorted alphabetically. +func (cc *Consensus) Peers(ctx context.Context) ([]peer.ID, error) { + + peers := []peer.ID{} + raftPeers, err := cc.raft.Peers(ctx) + if err != nil { + return nil, fmt.Errorf("cannot retrieve list of peers: %s", err) + } + + sort.Strings(raftPeers) + + for _, p := range raftPeers { + id, err := peer.Decode(p) + if err != nil { + panic("could not decode peer") + } + peers = append(peers, id) + } + return peers, nil +} + +func (cc *Consensus) IsLeader(ctx context.Context) bool { + leader, _ := cc.Leader(ctx) + return leader == cc.host.ID() +} diff --git a/lib/consensus/raft/interfaces.go b/lib/consensus/raft/interfaces.go new file mode 100644 index 000000000..2b77d1ebe --- /dev/null +++ b/lib/consensus/raft/interfaces.go @@ -0,0 +1,41 @@ +package consensus + +import ( + "context" + + consensus "github.com/libp2p/go-libp2p-consensus" + "github.com/libp2p/go-libp2p/core/peer" +) + +type ConsensusAPI interface { + // Returns a channel to signal that the consensus layer is ready + // allowing the main component to wait for it during start. + Ready(context.Context) <-chan struct{} + + AddPeer(context.Context, peer.ID) error + RmPeer(context.Context, peer.ID) error + State(context.Context) (consensus.State, error) + // Provide a node which is responsible to perform + // specific tasks which must only run in 1 cluster peer. + Leader(context.Context) (peer.ID, error) + // Only returns when the consensus state has all log + // updates applied to it. + WaitForSync(context.Context) error + // Clean removes all consensus data. + Clean(context.Context) error + // Peers returns the peerset participating in the Consensus. + Peers(context.Context) ([]peer.ID, error) + // IsTrustedPeer returns true if the given peer is "trusted". + // This will grant access to more rpc endpoints and a + // non-trusted one. This should be fast as it will be + // called repeatedly for every remote RPC request. + IsTrustedPeer(context.Context, peer.ID) bool + // Trust marks a peer as "trusted". + Trust(context.Context, peer.ID) error + // Distrust removes a peer from the "trusted" set. + Distrust(context.Context, peer.ID) error + // Returns true if current node is the cluster leader + IsLeader(ctx context.Context) bool + + Shutdown(context.Context) error +} diff --git a/lib/consensus/raft/raft.go b/lib/consensus/raft/raft.go new file mode 100644 index 000000000..8541e6f87 --- /dev/null +++ b/lib/consensus/raft/raft.go @@ -0,0 +1,563 @@ +package consensus + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/go-hclog" + hraft "github.com/hashicorp/raft" + raftboltdb "github.com/hashicorp/raft-boltdb" + "github.com/ipfs/go-log/v2" + p2praft "github.com/libp2p/go-libp2p-raft" + host "github.com/libp2p/go-libp2p/core/host" + peer "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/filecoin-project/lotus/lib/addrutil" + "github.com/filecoin-project/lotus/node/repo" +) + +var raftLogger = log.Logger("raft-cluster") + +// ErrWaitingForSelf is returned when we are waiting for ourselves to depart +// the peer set, which won't happen +var errWaitingForSelf = errors.New("waiting for ourselves to depart") + +// RaftMaxSnapshots indicates how many snapshots to keep in the consensus data +// folder. +// TODO: Maybe include this in Config. Not sure how useful it is to touch +// this anyways. +var RaftMaxSnapshots = 5 + +// RaftLogCacheSize is the maximum number of logs to cache in-memory. +// This is used to reduce disk I/O for the recently committed entries. +var RaftLogCacheSize = 512 + +// How long we wait for updates during shutdown before snapshotting +var waitForUpdatesShutdownTimeout = 5 * time.Second +var waitForUpdatesInterval = 400 * time.Millisecond + +// How many times to retry snapshotting when shutting down +var maxShutdownSnapshotRetries = 5 + +// raftWrapper wraps the hraft.Raft object and related things like the +// different stores used or the hraft.Configuration. +// Its methods provide functionality for working with Raft. +type raftWrapper struct { + ctx context.Context + cancel context.CancelFunc + raft *hraft.Raft + config *ClusterRaftConfig + host host.Host + serverConfig hraft.Configuration + transport *hraft.NetworkTransport + snapshotStore hraft.SnapshotStore + logStore hraft.LogStore + stableStore hraft.StableStore + boltdb *raftboltdb.BoltStore + repo repo.LockedRepo + staging bool +} + +// newRaftWrapper creates a Raft instance and initializes +// everything leaving it ready to use. Note, that Bootstrap() should be called +// to make sure the raft instance is usable. +func newRaftWrapper( + host host.Host, + cfg *ClusterRaftConfig, + fsm hraft.FSM, + repo repo.LockedRepo, + staging bool, +) (*raftWrapper, error) { + + raftW := &raftWrapper{} + raftW.config = cfg + raftW.host = host + raftW.staging = staging + raftW.repo = repo + // Set correct LocalID + cfg.RaftConfig.LocalID = hraft.ServerID(host.ID().String()) + + df := cfg.GetDataFolder(repo) + err := makeDataFolder(df) + if err != nil { + return nil, err + } + + err = raftW.makeServerConfig() + if err != nil { + return nil, err + } + + err = raftW.makeTransport() + if err != nil { + return nil, err + } + + err = raftW.makeStores() + if err != nil { + return nil, err + } + + raftLogger.Debug("creating Raft") + raftW.raft, err = hraft.NewRaft( + cfg.RaftConfig, + fsm, + raftW.logStore, + raftW.stableStore, + raftW.snapshotStore, + raftW.transport, + ) + if err != nil { + raftLogger.Error("initializing raft: ", err) + return nil, err + } + + raftW.ctx, raftW.cancel = context.WithCancel(context.Background()) + + return raftW, nil +} + +// makeDataFolder creates the folder that is meant to store Raft data. Ensures +// we always set 0700 mode. +func makeDataFolder(folder string) error { + return os.MkdirAll(folder, 0700) +} + +func (rw *raftWrapper) makeTransport() (err error) { + raftLogger.Debug("creating libp2p Raft transport") + rw.transport, err = p2praft.NewLibp2pTransport( + rw.host, + rw.config.NetworkTimeout, + ) + return err +} + +func (rw *raftWrapper) makeStores() error { + raftLogger.Debug("creating BoltDB store") + df := rw.config.GetDataFolder(rw.repo) + store, err := raftboltdb.NewBoltStore(filepath.Join(df, "raft.db")) + if err != nil { + return err + } + + // wraps the store in a LogCache to improve performance. + // See consul/agent/consul/server.go + cacheStore, err := hraft.NewLogCache(RaftLogCacheSize, store) + if err != nil { + return err + } + + raftLogger.Debug("creating raft snapshot store") + snapstore, err := hraft.NewFileSnapshotStoreWithLogger( + df, + RaftMaxSnapshots, + hclog.FromStandardLogger(zap.NewStdLog(log.Logger("raft-snapshot").SugaredLogger.Desugar()), hclog.DefaultOptions), + ) + if err != nil { + return err + } + + rw.logStore = cacheStore + rw.stableStore = store + rw.snapshotStore = snapstore + rw.boltdb = store + return nil +} + +// Bootstrap calls BootstrapCluster on the Raft instance with a valid +// Configuration (generated from InitPeerset) when Raft has no state +// and we are not setting up a staging peer. It returns if Raft +// was boostrapped (true) and an error. +func (rw *raftWrapper) Bootstrap() (bool, error) { + logger.Debug("checking for existing raft states") + hasState, err := hraft.HasExistingState( + rw.logStore, + rw.stableStore, + rw.snapshotStore, + ) + if err != nil { + return false, err + } + + if hasState { + logger.Debug("raft cluster is already initialized") + + // Inform the user that we are working with a pre-existing peerset + logger.Info("existing Raft state found! raft.InitPeerset will be ignored") + cf := rw.raft.GetConfiguration() + if err := cf.Error(); err != nil { + logger.Debug(err) + return false, err + } + currentCfg := cf.Configuration() + srvs := "" + for _, s := range currentCfg.Servers { + srvs += fmt.Sprintf(" %s\n", s.ID) + } + + logger.Debugf("Current Raft Peerset:\n%s\n", srvs) + return false, nil + } + + if rw.staging { + logger.Debug("staging servers do not need initialization") + logger.Info("peer is ready to join a cluster") + return false, nil + } + + voters := "" + for _, s := range rw.serverConfig.Servers { + voters += fmt.Sprintf(" %s\n", s.ID) + } + + logger.Infof("initializing raft cluster with the following voters:\n%s\n", voters) + + future := rw.raft.BootstrapCluster(rw.serverConfig) + if err := future.Error(); err != nil { + logger.Error("bootstrapping cluster: ", err) + return true, err + } + return true, nil +} + +// create Raft servers configuration. The result is used +// by Bootstrap() when it proceeds to Bootstrap. +func (rw *raftWrapper) makeServerConfig() error { + peers := []peer.ID{} + addrInfos, err := addrutil.ParseAddresses(context.Background(), rw.config.InitPeerset) + if err != nil { + return err + } + for _, addrInfo := range addrInfos { + peers = append(peers, addrInfo.ID) + } + rw.serverConfig = makeServerConf(append(peers, rw.host.ID())) + return nil +} + +// creates a server configuration with all peers as Voters. +func makeServerConf(peers []peer.ID) hraft.Configuration { + sm := make(map[string]struct{}) + + servers := make([]hraft.Server, 0) + + // Servers are peers + self. We avoid duplicate entries below + for _, pid := range peers { + p := pid.String() + _, ok := sm[p] + if !ok { // avoid dups + sm[p] = struct{}{} + servers = append(servers, hraft.Server{ + Suffrage: hraft.Voter, + ID: hraft.ServerID(p), + Address: hraft.ServerAddress(p), + }) + } + } + return hraft.Configuration{Servers: servers} +} + +// WaitForLeader holds until Raft says we have a leader. +// Returns if ctx is canceled. +func (rw *raftWrapper) WaitForLeader(ctx context.Context) (string, error) { + ticker := time.NewTicker(time.Second / 2) + for { + select { + case <-ticker.C: + if l := rw.raft.Leader(); l != "" { + logger.Debug("waitForleaderTimer") + logger.Infof("Current Raft Leader: %s", l) + ticker.Stop() + return string(l), nil + } + case <-ctx.Done(): + return "", ctx.Err() + } + } +} + +func (rw *raftWrapper) WaitForVoter(ctx context.Context) error { + logger.Debug("waiting until we are promoted to a voter") + + pid := hraft.ServerID(rw.host.ID().String()) + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + logger.Debugf("%s: get configuration", pid) + configFuture := rw.raft.GetConfiguration() + if err := configFuture.Error(); err != nil { + return err + } + + if isVoter(pid, configFuture.Configuration()) { + return nil + } + logger.Debugf("%s: not voter yet", pid) + + time.Sleep(waitForUpdatesInterval) + } + } +} + +func isVoter(srvID hraft.ServerID, cfg hraft.Configuration) bool { + for _, server := range cfg.Servers { + if server.ID == srvID && server.Suffrage == hraft.Voter { + return true + } + } + return false +} + +// WaitForUpdates holds until Raft has synced to the last index in the log +func (rw *raftWrapper) WaitForUpdates(ctx context.Context) error { + + logger.Debug("Raft state is catching up to the latest known version. Please wait...") + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + lai := rw.raft.AppliedIndex() + li := rw.raft.LastIndex() + logger.Debugf("current Raft index: %d/%d", + lai, li) + if lai == li { + return nil + } + time.Sleep(waitForUpdatesInterval) + } + } +} + +func (rw *raftWrapper) WaitForPeer(ctx context.Context, pid string, depart bool) error { + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + peers, err := rw.Peers(ctx) + if err != nil { + return err + } + + if len(peers) == 1 && pid == peers[0] && depart { + return errWaitingForSelf + } + + found := find(peers, pid) + + // departing + if depart && !found { + return nil + } + + // joining + if !depart && found { + return nil + } + + time.Sleep(50 * time.Millisecond) + } + } +} + +// Snapshot tells Raft to take a snapshot. +func (rw *raftWrapper) Snapshot() error { + future := rw.raft.Snapshot() + err := future.Error() + if err != nil && err.Error() != hraft.ErrNothingNewToSnapshot.Error() { + return err + } + return nil +} + +// snapshotOnShutdown attempts to take a snapshot before a shutdown. +// Snapshotting might fail if the raft applied index is not the last index. +// This waits for the updates and tries to take a snapshot when the +// applied index is up to date. +// It will retry if the snapshot still fails, in case more updates have arrived. +// If waiting for updates times-out, it will not try anymore, since something +// is wrong. This is a best-effort solution as there is no way to tell Raft +// to stop processing entries because we want to take a snapshot before +// shutting down. +func (rw *raftWrapper) snapshotOnShutdown() error { + var err error + for i := 0; i < maxShutdownSnapshotRetries; i++ { + ctx, cancel := context.WithTimeout(context.Background(), waitForUpdatesShutdownTimeout) + err = rw.WaitForUpdates(ctx) + cancel() + if err != nil { + logger.Warn("timed out waiting for state updates before shutdown. Snapshotting may fail") + return rw.Snapshot() + } + + err = rw.Snapshot() + if err == nil { + return nil // things worked + } + + // There was an error + err = errors.New("could not snapshot raft: " + err.Error()) + logger.Warnf("retrying to snapshot (%d/%d)...", i+1, maxShutdownSnapshotRetries) + } + return err +} + +// Shutdown shutdown Raft and closes the BoltDB. +func (rw *raftWrapper) Shutdown(ctx context.Context) error { + + rw.cancel() + + var finalErr error + + err := rw.snapshotOnShutdown() + if err != nil { + finalErr = multierr.Append(finalErr, err) + } + + future := rw.raft.Shutdown() + err = future.Error() + if err != nil { + finalErr = multierr.Append(finalErr, err) + } + + err = rw.boltdb.Close() // important! + if err != nil { + finalErr = multierr.Append(finalErr, err) + } + + return finalErr +} + +// AddPeer adds a peer to Raft +func (rw *raftWrapper) AddPeer(ctx context.Context, peerId peer.ID) error { + + // Check that we don't have it to not waste + // log entries if so. + peers, err := rw.Peers(ctx) + if err != nil { + return err + } + if find(peers, peerId.String()) { + logger.Infof("%s is already a raft peerStr", peerId.String()) + return nil + } + + err = rw.host.Connect(ctx, peer.AddrInfo{ID: peerId}) + if err != nil { + return err + } + + future := rw.raft.AddVoter( + hraft.ServerID(peerId.String()), + hraft.ServerAddress(peerId.String()), + 0, + 0, + ) // TODO: Extra cfg value? + err = future.Error() + if err != nil { + logger.Error("raft cannot add peer: ", err) + } + return err +} + +// RemovePeer removes a peer from Raft +func (rw *raftWrapper) RemovePeer(ctx context.Context, peer string) error { + // Check that we have it to not waste + // log entries if we don't. + peers, err := rw.Peers(ctx) + if err != nil { + return err + } + if !find(peers, peer) { + logger.Infof("%s is not among raft peers", peer) + return nil + } + + if len(peers) == 1 && peers[0] == peer { + return errors.New("cannot remove ourselves from a 1-peer cluster") + } + + rmFuture := rw.raft.RemoveServer( + hraft.ServerID(peer), + 0, + 0, + ) + err = rmFuture.Error() + if err != nil { + logger.Error("raft cannot remove peer: ", err) + return err + } + + return nil +} + +// Leader returns Raft's leader. It may be an empty string if +// there is no leader or it is unknown. +func (rw *raftWrapper) Leader(ctx context.Context) string { + return string(rw.raft.Leader()) +} + +func (rw *raftWrapper) Peers(ctx context.Context) ([]string, error) { + ids := make([]string, 0) + + configFuture := rw.raft.GetConfiguration() + if err := configFuture.Error(); err != nil { + return nil, err + } + + for _, server := range configFuture.Configuration().Servers { + ids = append(ids, string(server.ID)) + } + + return ids, nil +} + +// CleanupRaft moves the current data folder to a backup location +//func CleanupRaft(cfg *Config) error { +// dataFolder := cfg.GetDataFolder() +// keep := cfg.BackupsRotate +// +// meta, _, err := latestSnapshot(dataFolder) +// if meta == nil && err == nil { +// // no snapshots at all. Avoid creating backups +// // from empty state folders. +// logger.Infof("cleaning empty Raft data folder (%s)", dataFolder) +// os.RemoveAll(dataFolder) +// return nil +// } +// +// logger.Infof("cleaning and backing up Raft data folder (%s)", dataFolder) +// dbh := newDataBackupHelper(dataFolder, keep) +// err = dbh.makeBackup() +// if err != nil { +// logger.Warn(err) +// logger.Warn("the state could not be cleaned properly") +// logger.Warn("manual intervention may be needed before starting cluster again") +// } +// return nil +//} + +// only call when Raft is shutdown +func (rw *raftWrapper) Clean() error { + //return CleanupRaft(rw.config) + return nil +} + +func find(s []string, elem string) bool { + for _, selem := range s { + if selem == elem { + return true + } + } + return false +} diff --git a/lib/harmony/harmonydb/harmonydb.go b/lib/harmony/harmonydb/harmonydb.go index 5ec9f5a25..0fed176d2 100644 --- a/lib/harmony/harmonydb/harmonydb.go +++ b/lib/harmony/harmonydb/harmonydb.go @@ -10,8 +10,6 @@ import ( "sort" "strconv" "strings" - "sync" - "sync/atomic" "time" logging "github.com/ipfs/go-log/v2" @@ -35,8 +33,6 @@ type DB struct { cfg *pgxpool.Config schema string hostnames []string - BTFPOnce sync.Once - BTFP atomic.Uintptr } var logger = logging.Logger("harmonydb") diff --git a/lib/harmony/harmonydb/userfuncs.go b/lib/harmony/harmonydb/userfuncs.go index 7fcf76dcd..788ca4a34 100644 --- a/lib/harmony/harmonydb/userfuncs.go +++ b/lib/harmony/harmonydb/userfuncs.go @@ -3,17 +3,13 @@ package harmonydb import ( "context" "errors" - "runtime" "github.com/georgysavva/scany/v2/pgxscan" "github.com/jackc/pgerrcode" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" - "github.com/samber/lo" ) -var errTx = errors.New("Cannot use a non-transaction func in a transaction") - // rawStringOnly is _intentionally_private_ to force only basic strings in SQL queries. // In any package, raw strings will satisfy compilation. Ex: // @@ -26,9 +22,6 @@ type rawStringOnly string // Note, for CREATE & DROP please keep these permanent and express // them in the ./sql/ files (next number). func (db *DB) Exec(ctx context.Context, sql rawStringOnly, arguments ...any) (count int, err error) { - if db.usedInTransaction() { - return 0, errTx - } res, err := db.pgx.Exec(ctx, string(sql), arguments...) return int(res.RowsAffected()), err } @@ -62,9 +55,6 @@ type Query struct { // fmt.Println(id, name) // } func (db *DB) Query(ctx context.Context, sql rawStringOnly, arguments ...any) (*Query, error) { - if db.usedInTransaction() { - return &Query{}, errTx - } q, err := db.pgx.Query(ctx, string(sql), arguments...) return &Query{q}, err } @@ -76,10 +66,6 @@ type Row interface { Scan(...any) error } -type rowErr struct{} - -func (rowErr) Scan(_ ...any) error { return errTx } - // QueryRow gets 1 row using column order matching. // This is a timesaver for the special case of wanting the first row returned only. // EX: @@ -88,9 +74,6 @@ func (rowErr) Scan(_ ...any) error { return errTx } // var ID = 123 // err := db.QueryRow(ctx, "SELECT name, pet FROM users WHERE ID=?", ID).Scan(&name, &pet) func (db *DB) QueryRow(ctx context.Context, sql rawStringOnly, arguments ...any) Row { - if db.usedInTransaction() { - return rowErr{} - } return db.pgx.QueryRow(ctx, string(sql), arguments...) } @@ -109,9 +92,6 @@ Ex: err := db.Select(ctx, &users, "SELECT name, id, tel_no FROM customers WHERE pet=?", pet) */ func (db *DB) Select(ctx context.Context, sliceOfStructPtr any, sql rawStringOnly, arguments ...any) error { - if db.usedInTransaction() { - return errTx - } return pgxscan.Select(ctx, db.pgx, sliceOfStructPtr, string(sql), arguments...) } @@ -120,32 +100,10 @@ type Tx struct { ctx context.Context } -// usedInTransaction is a helper to prevent nesting transactions -// & non-transaction calls in transactions. It only checks 20 frames. -// Fast: This memory should all be in CPU Caches. -func (db *DB) usedInTransaction() bool { - var framePtrs = (&[20]uintptr{})[:] // 20 can be stack-local (no alloc) - framePtrs = framePtrs[:runtime.Callers(3, framePtrs)] // skip past our caller. - return lo.Contains(framePtrs, db.BTFP.Load()) // Unsafe read @ beginTx overlap, but 'return false' is correct there. -} - // BeginTransaction is how you can access transactions using this library. // The entire transaction happens in the function passed in. // The return must be true or a rollback will occur. -// Be sure to test the error for IsErrSerialization() if you want to retry -// -// when there is a DB serialization error. -// -//go:noinline func (db *DB) BeginTransaction(ctx context.Context, f func(*Tx) (commit bool, err error)) (didCommit bool, retErr error) { - db.BTFPOnce.Do(func() { - fp := make([]uintptr, 20) - runtime.Callers(1, fp) - db.BTFP.Store(fp[0]) - }) - if db.usedInTransaction() { - return false, errTx - } tx, err := db.pgx.BeginTx(ctx, pgx.TxOptions{}) if err != nil { return false, err @@ -198,8 +156,3 @@ func IsErrUniqueContraint(err error) bool { var e2 *pgconn.PgError return errors.As(err, &e2) && e2.Code == pgerrcode.UniqueViolation } - -func IsErrSerialization(err error) bool { - var e2 *pgconn.PgError - return errors.As(err, &e2) && e2.Code == pgerrcode.SerializationFailure -} diff --git a/lib/harmony/harmonytask/task_type_handler.go b/lib/harmony/harmonytask/task_type_handler.go index a6e8933d2..34f7a5c3e 100644 --- a/lib/harmony/harmonytask/task_type_handler.go +++ b/lib/harmony/harmonytask/task_type_handler.go @@ -25,8 +25,6 @@ type taskTypeHandler struct { func (h *taskTypeHandler) AddTask(extra func(TaskID, *harmonydb.Tx) (bool, error)) { var tID TaskID - retryWait := time.Millisecond * 100 -retryAddTask: _, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) { // create taskID (from DB) _, err := tx.Exec(`INSERT INTO harmony_task (name, added_by, posted_time) @@ -46,11 +44,6 @@ retryAddTask: log.Debugf("addtask(%s) saw unique constraint, so it's added already.", h.Name) return } - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryAddTask - } log.Error("Could not add task. AddTasFunc failed: %v", err) return } @@ -168,8 +161,7 @@ top: func (h *taskTypeHandler) recordCompletion(tID TaskID, workStart time.Time, done bool, doErr error) { workEnd := time.Now() - retryWait := time.Millisecond * 100 -retryRecordCompletion: + cm, err := h.TaskEngine.db.BeginTransaction(h.TaskEngine.ctx, func(tx *harmonydb.Tx) (bool, error) { var postedTime time.Time err := tx.QueryRow(`SELECT posted_time FROM harmony_task WHERE id=$1`, tID).Scan(&postedTime) @@ -184,9 +176,6 @@ retryRecordCompletion: return false, fmt.Errorf("could not log completion: %w", err) } result = "" - if doErr != nil { - result = "non-failing error: " + doErr.Error() - } } else { if doErr != nil { result = "error: " + doErr.Error() @@ -225,11 +214,6 @@ VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, tID, h.Name, postedTime, workStart, wo return true, nil }) if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryRecordCompletion - } log.Error("Could not record transaction: ", err) return } diff --git a/lib/result/result.go b/lib/result/result.go index 56a9ffab7..4f3a12ee8 100644 --- a/lib/result/result.go +++ b/lib/result/result.go @@ -1,5 +1,7 @@ package result +import "encoding/json" + // Result is a small wrapper type encapsulating Value/Error tuples, mostly for // use when sending values across channels // NOTE: Avoid adding any functionality to this, any "nice" things added here will @@ -39,3 +41,13 @@ func (r Result[T]) Assert(noErrFn func(err error, msgAndArgs ...interface{})) T return r.Value } + +// MarshalJSON implements the json.Marshaler interface, marshalling string error correctly +// this method makes the display in log.Infow nicer +func (r Result[T]) MarshalJSON() ([]byte, error) { + if r.Error != nil { + return json.Marshal(map[string]string{"Error": r.Error.Error()}) + } + + return json.Marshal(map[string]interface{}{"Value": r.Value}) +} diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go index 1d7519ff9..e3d318780 100644 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -55,21 +55,21 @@ func TestOnDealSectorPreCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, @@ -277,21 +277,21 @@ func TestOnDealSectorCommitted(t *testing.T) { } unfinishedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: -1, LastUpdatedEpoch: 2, }, } activeDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } slashedDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, SlashEpoch: 2, diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index bdfce6f55..11742c879 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -32,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/helpers" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sectorblocks" ) @@ -92,11 +93,11 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema return nil, xerrors.Errorf("deal.PublishCid can't be nil") } - sdInfo := api.PieceDealInfo{ + sdInfo := piece.PieceDealInfo{ DealID: deal.DealID, DealProposal: &deal.Proposal, PublishCid: deal.PublishCid, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: deal.ClientDealProposal.Proposal.StartEpoch, EndEpoch: deal.ClientDealProposal.Proposal.EndEpoch, }, diff --git a/node/builder.go b/node/builder.go index 1cd4823d5..128a99f87 100644 --- a/node/builder.go +++ b/node/builder.go @@ -127,6 +127,7 @@ const ( SettlePaymentChannelsKey RunPeerTaggerKey SetupFallbackBlockstoresKey + GoRPCServer ConsensusReporterKey diff --git a/node/builder_chain.go b/node/builder_chain.go index 348916010..2d9c0ea2e 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -3,6 +3,7 @@ package node import ( "os" + gorpc "github.com/libp2p/go-libp2p-gorpc" "go.uber.org/fx" "golang.org/x/xerrors" @@ -17,6 +18,7 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/index" @@ -30,6 +32,7 @@ import ( "github.com/filecoin-project/lotus/chain/wallet" ledgerwallet "github.com/filecoin-project/lotus/chain/wallet/ledger" "github.com/filecoin-project/lotus/chain/wallet/remotewallet" + raftcns "github.com/filecoin-project/lotus/lib/consensus/raft" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/markets/retrievaladapter" "github.com/filecoin-project/lotus/markets/storageadapter" @@ -153,6 +156,7 @@ var ChainNode = Options( Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), Override(new(full.EthModuleAPI), From(new(api.Gateway))), Override(new(full.EthEventAPI), From(new(api.Gateway))), + Override(new(full.ActorEventAPI), From(new(api.Gateway))), ), // Full node API / service startup @@ -221,7 +225,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), Override(new(dtypes.ClientImportMgr), modules.ClientImportMgr), @@ -249,14 +253,26 @@ func ConfigFullNode(c interface{}) Option { Override(new(wallet.Default), wallet.NilDefault), ), + // Chain node cluster enabled + If(cfg.Cluster.ClusterModeEnabled, + Override(new(*gorpc.Client), modules.NewRPCClient), + Override(new(*raftcns.ClusterRaftConfig), raftcns.NewClusterRaftConfig(&cfg.Cluster)), + Override(new(*raftcns.Consensus), raftcns.NewConsensusWithRPCClient(false)), + Override(new(*messagesigner.MessageSignerConsensus), messagesigner.NewMessageSignerConsensus), + Override(new(messagesigner.MsgSigner), From(new(*messagesigner.MessageSignerConsensus))), + Override(new(*modules.RPCHandler), modules.NewRPCHandler), + Override(GoRPCServer, modules.NewRPCServer), + ), + // Actor event filtering support - Override(new(events.EventAPI), From(new(modules.EventAPI))), + Override(new(events.EventHelperAPI), From(new(modules.EventHelperAPI))), + Override(new(*filter.EventFilterManager), modules.EventFilterManager(cfg.Events)), // in lite-mode Eth api is provided by gateway ApplyIf(isFullNode, If(cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), - Override(new(full.EthEventAPI), modules.EthEventAPI(cfg.Fevm)), + Override(new(full.EthEventAPI), modules.EthEventHandler(cfg.Events, cfg.Fevm.EnableEthRPC)), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), @@ -264,6 +280,15 @@ func ConfigFullNode(c interface{}) Option { ), ), + ApplyIf(isFullNode, + If(cfg.Events.EnableActorEventsAPI, + Override(new(full.ActorEventAPI), modules.ActorEventHandler(cfg.Events)), + ), + If(!cfg.Events.EnableActorEventsAPI, + Override(new(full.ActorEventAPI), &full.ActorEventDummy{}), + ), + ), + // enable message index for full node when configured by the user, otherwise use dummy. If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)), If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)), diff --git a/node/config/cfgdocgen/gen.go b/node/config/cfgdocgen/gen.go index b13b7d799..6c7371a40 100644 --- a/node/config/cfgdocgen/gen.go +++ b/node/config/cfgdocgen/gen.go @@ -74,6 +74,11 @@ func run() error { name := f[0] typ := f[1] + if len(comment) > 0 && strings.HasPrefix(comment[0], fmt.Sprintf("%s is DEPRECATED", name)) { + // don't document deprecated fields + continue + } + out[currentType] = append(out[currentType], field{ Name: name, Type: typ, diff --git a/node/config/def.go b/node/config/def.go index 192ab4f5e..7e06eba3a 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -106,17 +106,19 @@ func DefaultFullNode() *FullNode { HotstoreMaxSpaceSafetyBuffer: 50_000_000_000, }, }, + Cluster: *DefaultUserRaftConfig(), Fevm: FevmConfig{ EnableEthRPC: false, EthTxHashMappingLifetimeDays: 0, - Events: Events{ - DisableRealTimeFilterAPI: false, - DisableHistoricFilterAPI: false, - FilterTTL: Duration(time.Hour * 24), - MaxFilters: 100, - MaxFilterResults: 10000, - MaxFilterHeightRange: 2880, // conservative limit of one day - }, + }, + Events: EventsConfig{ + DisableRealTimeFilterAPI: false, + DisableHistoricFilterAPI: false, + EnableActorEventsAPI: false, + FilterTTL: Duration(time.Hour * 24), + MaxFilters: 100, + MaxFilterResults: 10000, + MaxFilterHeightRange: 2880, // conservative limit of one day }, } } @@ -326,11 +328,31 @@ const ( ResourceFilteringDisabled = ResourceFilteringStrategy("disabled") ) +var ( + DefaultDataSubFolder = "raft" + DefaultWaitForLeaderTimeout = 15 * time.Second + DefaultCommitRetries = 1 + DefaultNetworkTimeout = 100 * time.Second + DefaultCommitRetryDelay = 200 * time.Millisecond + DefaultBackupsRotate = 6 +) + +func DefaultUserRaftConfig() *UserRaftConfig { + var cfg UserRaftConfig + cfg.DataFolder = "" // empty so it gets omitted + cfg.InitPeersetMultiAddr = []string{} + cfg.WaitForLeaderTimeout = Duration(DefaultWaitForLeaderTimeout) + cfg.NetworkTimeout = Duration(DefaultNetworkTimeout) + cfg.CommitRetries = DefaultCommitRetries + cfg.CommitRetryDelay = Duration(DefaultCommitRetryDelay) + cfg.BackupsRotate = DefaultBackupsRotate + + return &cfg +} + func DefaultLotusProvider() *LotusProviderConfig { return &LotusProviderConfig{ - Subsystems: ProviderSubsystemsConfig{ - GuiAddress: ":4701", - }, + Subsystems: ProviderSubsystemsConfig{}, Fees: LotusProviderFees{ DefaultMaxFee: DefaultDefaultMaxFee, MaxPreCommitGasFee: types.MustParseFIL("0.025"), diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 8ea61c782..929f15766 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -357,14 +357,13 @@ see https://lotus.filecoin.io/storage-providers/advanced-configurations/market/# Comment: ``, }, }, - "Events": { + "EventsConfig": { { Name: "DisableRealTimeFilterAPI", Type: "bool", - Comment: `EnableEthRPC enables APIs that -DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag.`, + Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. +The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, }, { Name: "DisableHistoricFilterAPI", @@ -372,7 +371,16 @@ The API is enabled when EnableEthRPC is true, but can be disabled selectively wi Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events that occurred in the past. HistoricFilterAPI maintains a queryable index of events. -The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag.`, +The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, + }, + { + Name: "EnableActorEventsAPI", + Type: "bool", + + Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events +emitted by (smart contracts + built-in Actors). +This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be +disabled by setting their respective Disable* options.`, }, { Name: "FilterTTL", @@ -464,7 +472,7 @@ Set to 0 to keep all mappings`, }, { Name: "Events", - Type: "Events", + Type: "DeprecatedEvents", Comment: ``, }, @@ -494,12 +502,24 @@ Set to 0 to keep all mappings`, Comment: ``, }, + { + Name: "Cluster", + Type: "UserRaftConfig", + + Comment: ``, + }, { Name: "Fevm", Type: "FevmConfig", Comment: ``, }, + { + Name: "Events", + Type: "EventsConfig", + + Comment: ``, + }, { Name: "Index", Type: "IndexConfig", @@ -1007,18 +1027,6 @@ block rewards will be missed!`, Comment: ``, }, - { - Name: "EnableWebGui", - Type: "bool", - - Comment: ``, - }, - { - Name: "GuiAddress", - Type: "string", - - Comment: `The address that should listen for Web GUI requests.`, - }, }, "ProvingConfig": { { @@ -1556,6 +1564,30 @@ Submitting a smaller number of prove commits per epoch would reduce the possibil Comment: `UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB.`, }, + { + Name: "RequireActivationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireActivationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3).`, + }, + { + Name: "RequireNotificationSuccess", + Type: "bool", + + Comment: `Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3).`, + }, + { + Name: "RequireNotificationSuccessUpdate", + Type: "bool", + + Comment: `Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3).`, + }, }, "Splitstore": { { @@ -1686,6 +1718,68 @@ HotstoreMaxSpaceTarget - HotstoreMaxSpaceSafetyBuffer`, Comment: ``, }, }, + "UserRaftConfig": { + { + Name: "ClusterModeEnabled", + Type: "bool", + + Comment: `EXPERIMENTAL. config to enabled node cluster with raft consensus`, + }, + { + Name: "DataFolder", + Type: "string", + + Comment: `A folder to store Raft's data.`, + }, + { + Name: "InitPeersetMultiAddr", + Type: "[]string", + + Comment: `InitPeersetMultiAddr provides the list of initial cluster peers for new Raft +peers (with no prior state). It is ignored when Raft was already +initialized or when starting in staging mode.`, + }, + { + Name: "WaitForLeaderTimeout", + Type: "Duration", + + Comment: `LeaderTimeout specifies how long to wait for a leader before +failing an operation.`, + }, + { + Name: "NetworkTimeout", + Type: "Duration", + + Comment: `NetworkTimeout specifies how long before a Raft network +operation is timed out`, + }, + { + Name: "CommitRetries", + Type: "int", + + Comment: `CommitRetries specifies how many times we retry a failed commit until +we give up.`, + }, + { + Name: "CommitRetryDelay", + Type: "Duration", + + Comment: `How long to wait between retries`, + }, + { + Name: "BackupsRotate", + Type: "int", + + Comment: `BackupsRotate specifies the maximum number of Raft's DataFolder +copies that we keep as backups (renaming) after cleanup.`, + }, + { + Name: "Tracing", + Type: "bool", + + Comment: `Tracing enables propagation of contexts across binary boundaries.`, + }, + }, "Wallet": { { Name: "RemoteBackend", diff --git a/node/config/load.go b/node/config/load.go index fd015d533..354d3b3cb 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -18,12 +18,9 @@ import ( // FromFile loads config from a specified file overriding defaults specified in // the def parameter. If file does not exist or is empty defaults are assumed. func FromFile(path string, opts ...LoadCfgOpt) (interface{}, error) { - var loadOpts cfgLoadOpts - var err error - for _, opt := range opts { - if err = opt(&loadOpts); err != nil { - return nil, xerrors.Errorf("failed to apply load cfg option: %w", err) - } + loadOpts, err := applyOpts(opts...) + if err != nil { + return nil, err } var def interface{} if loadOpts.defaultCfg != nil { @@ -56,16 +53,43 @@ func FromFile(path string, opts ...LoadCfgOpt) (interface{}, error) { return nil, xerrors.Errorf("config failed validation: %w", err) } } - return FromReader(buf, def) + return FromReader(buf, def, opts...) } // FromReader loads config from a reader instance. -func FromReader(reader io.Reader, def interface{}) (interface{}, error) { - cfg := def - _, err := toml.NewDecoder(reader).Decode(cfg) +func FromReader(reader io.Reader, def interface{}, opts ...LoadCfgOpt) (interface{}, error) { + loadOpts, err := applyOpts(opts...) if err != nil { return nil, err } + cfg := def + md, err := toml.NewDecoder(reader).Decode(cfg) + if err != nil { + return nil, err + } + + // find any fields with a tag: `moved:"New.Config.Location"` and move any set values there over to + // the new location if they are not already set there. + movedFields := findMovedFields(nil, cfg) + var warningOut io.Writer = os.Stderr + if loadOpts.warningWriter != nil { + warningOut = loadOpts.warningWriter + } + for _, d := range movedFields { + if md.IsDefined(d.Field...) { + fmt.Fprintf( + warningOut, + "WARNING: Use of deprecated configuration option '%s' will be removed in a future release, use '%s' instead\n", + strings.Join(d.Field, "."), + strings.Join(d.NewField, ".")) + if !md.IsDefined(d.NewField...) { + // new value isn't set but old is, we should move what the user set there + if err := moveFieldValue(cfg, d.Field, d.NewField); err != nil { + return nil, fmt.Errorf("failed to move field value: %w", err) + } + } + } + } err = envconfig.Process("LOTUS", cfg) if err != nil { @@ -75,14 +99,105 @@ func FromReader(reader io.Reader, def interface{}) (interface{}, error) { return cfg, nil } +// move a value from the location in the valPtr struct specified by oldPath, to the location +// specified by newPath; where the path is an array of nested field names. +func moveFieldValue(valPtr interface{}, oldPath []string, newPath []string) error { + oldValue, err := getFieldValue(valPtr, oldPath) + if err != nil { + return err + } + val := reflect.ValueOf(valPtr).Elem() + for { + field := val.FieldByName(newPath[0]) + if !field.IsValid() { + return fmt.Errorf("unexpected error fetching field value") + } + if len(newPath) == 1 { + if field.Kind() != oldValue.Kind() { + return fmt.Errorf("unexpected error, old kind != new kind") + } + // set field on val to be the new one, and we're done + field.Set(oldValue) + return nil + } + if field.Kind() != reflect.Struct { + return fmt.Errorf("unexpected error fetching field value, is not a struct") + } + newPath = newPath[1:] + val = field + } +} + +// recursively iterate into `path` to find the terminal value +func getFieldValue(val interface{}, path []string) (reflect.Value, error) { + if reflect.ValueOf(val).Kind() == reflect.Ptr { + val = reflect.ValueOf(val).Elem().Interface() + } + field := reflect.ValueOf(val).FieldByName(path[0]) + if !field.IsValid() { + return reflect.Value{}, fmt.Errorf("unexpected error fetching field value") + } + if len(path) > 1 { + if field.Kind() != reflect.Struct { + return reflect.Value{}, fmt.Errorf("unexpected error fetching field value, is not a struct") + } + return getFieldValue(field.Interface(), path[1:]) + } + return field, nil +} + +type movedField struct { + Field []string + NewField []string +} + +// inspect the fields recursively within a struct and find any with "moved" tags +func findMovedFields(path []string, val interface{}) []movedField { + dep := make([]movedField, 0) + if reflect.ValueOf(val).Kind() == reflect.Ptr { + val = reflect.ValueOf(val).Elem().Interface() + } + t := reflect.TypeOf(val) + if t.Kind() != reflect.Struct { + return nil + } + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + // could also do a "deprecated" in here + if idx := field.Tag.Get("moved"); idx != "" && idx != "-" { + dep = append(dep, movedField{ + Field: append(path, field.Name), + NewField: strings.Split(idx, "."), + }) + } + if field.Type.Kind() == reflect.Struct && reflect.ValueOf(val).FieldByName(field.Name).IsValid() { + deps := findMovedFields(append(path, field.Name), reflect.ValueOf(val).FieldByName(field.Name).Interface()) + dep = append(dep, deps...) + } + } + return dep +} + type cfgLoadOpts struct { defaultCfg func() (interface{}, error) canFallbackOnDefault func() error validate func(string) error + warningWriter io.Writer } type LoadCfgOpt func(opts *cfgLoadOpts) error +func applyOpts(opts ...LoadCfgOpt) (cfgLoadOpts, error) { + var loadOpts cfgLoadOpts + var err error + for _, opt := range opts { + if err = opt(&loadOpts); err != nil { + return loadOpts, fmt.Errorf("failed to apply load cfg option: %w", err) + } + } + return loadOpts, nil +} + func SetDefault(f func() (interface{}, error)) LoadCfgOpt { return func(opts *cfgLoadOpts) error { opts.defaultCfg = f @@ -104,6 +219,13 @@ func SetValidate(f func(string) error) LoadCfgOpt { } } +func SetWarningWriter(w io.Writer) LoadCfgOpt { + return func(opts *cfgLoadOpts) error { + opts.warningWriter = w + return nil + } +} + func NoDefaultForSplitstoreTransition() error { return xerrors.Errorf("FullNode config not found and fallback to default disallowed while we transition to splitstore discard default. Use `lotus config default` to set this repo up with a default config. Be sure to set `EnableSplitstore` to `false` if you are running a full archive node") } diff --git a/node/config/load_test.go b/node/config/load_test.go index e17660c19..2edef259b 100644 --- a/node/config/load_test.go +++ b/node/config/load_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func fullNodeDefault() (interface{}, error) { return DefaultFullNode(), nil } @@ -138,3 +139,77 @@ func TestFailToFallbackToDefault(t *testing.T) { _, err = FromFile(nonExistantFileName, SetDefault(fullNodeDefault), SetCanFallbackOnDefault(NoDefaultForSplitstoreTransition)) assert.Error(t, err) } + +func TestPrintDeprecated(t *testing.T) { + type ChildCfg struct { + Field string `moved:"Bang"` + NewField string + } + type Old struct { + Thing1 int `moved:"New.Thing1"` + Thing2 int `moved:"New.Thing2"` + } + type New struct { + Thing1 int + Thing2 int + } + type ParentCfg struct { + Child ChildCfg + Old Old + New New + Foo int + Baz string `moved:"Child.NewField"` + Boom int `moved:"Foo"` + Bang string + } + + t.Run("warning output", func(t *testing.T) { + cfg := ` + Baz = "baz" + Foo = 100 + [Child] + Field = "bip" + NewField = "bop" + ` + + warningWriter := bytes.NewBuffer(nil) + + v, err := FromReader(bytes.NewReader([]byte(cfg)), &ParentCfg{Boom: 200, Bang: "300"}, SetWarningWriter(warningWriter)) + + require.NoError(t, err) + require.Equal(t, &ParentCfg{ + Child: ChildCfg{ + Field: "bip", + NewField: "bop", + }, + Baz: "baz", + Foo: 100, + Boom: 200, + Bang: "bip", + }, v) + require.Regexp(t, `\WChild\.Field\W.+use 'Bang' instead`, warningWriter.String()) + require.Regexp(t, `\WBaz\W.+use 'Child\.NewField' instead`, warningWriter.String()) + require.NotContains(t, warningWriter.String(), "don't use this at all") + require.NotContains(t, warningWriter.String(), "Boom") + }) + + defaultNew := New{Thing1: 42, Thing2: 800} + testCases := []struct { + name string + cfg string + expected New + }{ + {"simple", ``, defaultNew}, + {"set new", "[New]\nThing1 = 101\nThing2 = 102\n", New{Thing1: 101, Thing2: 102}}, + // should move old to new fields if new isn't set + {"set old", "[Old]\nThing1 = 101\nThing2 = 102\n", New{Thing1: 101, Thing2: 102}}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + v, err := FromReader(bytes.NewReader([]byte(tc.cfg)), &ParentCfg{New: defaultNew}) + require.NoError(t, err) + require.Equal(t, tc.expected, v.(*ParentCfg).New) + }) + } +} diff --git a/node/config/types.go b/node/config/types.go index 8661ce190..ecb946f5d 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -26,7 +26,9 @@ type FullNode struct { Wallet Wallet Fees FeeConfig Chainstore Chainstore + Cluster UserRaftConfig Fevm FevmConfig + Events EventsConfig Index IndexConfig FaultReporter FaultReporterConfig } @@ -95,10 +97,6 @@ type ProviderSubsystemsConfig struct { WindowPostMaxTasks int EnableWinningPost bool WinningPostMaxTasks int - - EnableWebGui bool - // The address that should listen for Web GUI requests. - GuiAddress string } type DAGStoreConfig struct { @@ -490,6 +488,15 @@ type SealingConfig struct { // UseSyntheticPoRep, when set to true, will reduce the amount of cache data held on disk after the completion of PreCommit 2 to 11GiB. UseSyntheticPoRep bool + + // Whether to abort if any sector activation in a batch fails (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (newly sealed sectors, only with ProveCommitSectors3). + RequireActivationSuccessUpdate bool + // Whether to abort if any sector activation in a batch fails (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccess bool + // Whether to abort if any piece activation notification returns a non-zero exit code (updating sectors, only with ProveReplicaUpdates3). + RequireNotificationSuccessUpdate bool } type SealerConfig struct { @@ -748,6 +755,33 @@ type FeeConfig struct { DefaultMaxFee types.FIL } +type UserRaftConfig struct { + // EXPERIMENTAL. config to enabled node cluster with raft consensus + ClusterModeEnabled bool + // A folder to store Raft's data. + DataFolder string + // InitPeersetMultiAddr provides the list of initial cluster peers for new Raft + // peers (with no prior state). It is ignored when Raft was already + // initialized or when starting in staging mode. + InitPeersetMultiAddr []string + // LeaderTimeout specifies how long to wait for a leader before + // failing an operation. + WaitForLeaderTimeout Duration + // NetworkTimeout specifies how long before a Raft network + // operation is timed out + NetworkTimeout Duration + // CommitRetries specifies how many times we retry a failed commit until + // we give up. + CommitRetries int + // How long to wait between retries + CommitRetryDelay Duration + // BackupsRotate specifies the maximum number of Raft's DataFolder + // copies that we keep as backups (renaming) after cleanup. + BackupsRotate int + // Tracing enables propagation of contexts across binary boundaries. + Tracing bool +} + type FevmConfig struct { // EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. @@ -757,20 +791,48 @@ type FevmConfig struct { // Set to 0 to keep all mappings EthTxHashMappingLifetimeDays int - Events Events + Events DeprecatedEvents `toml:"Events,omitempty"` } -type Events struct { - // EnableEthRPC enables APIs that +type DeprecatedEvents struct { + // DisableRealTimeFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableRealTimeFilterAPI instead. + DisableRealTimeFilterAPI bool `moved:"Events.DisableRealTimeFilterAPI" toml:"DisableRealTimeFilterAPI,omitempty"` + + // DisableHistoricFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableHistoricFilterAPI instead. + DisableHistoricFilterAPI bool `moved:"Events.DisableHistoricFilterAPI" toml:"DisableHistoricFilterAPI,omitempty"` + + // FilterTTL is DEPRECATED and will be removed in a future release. Use Events.FilterTTL instead. + FilterTTL Duration `moved:"Events.FilterTTL" toml:"FilterTTL,omitzero"` + + // MaxFilters is DEPRECATED and will be removed in a future release. Use Events.MaxFilters instead. + MaxFilters int `moved:"Events.MaxFilters" toml:"MaxFilters,omitzero"` + + // MaxFilterResults is DEPRECATED and will be removed in a future release. Use Events.MaxFilterResults instead. + MaxFilterResults int `moved:"Events.MaxFilterResults" toml:"MaxFilterResults,omitzero"` + + // MaxFilterHeightRange is DEPRECATED and will be removed in a future release. Use Events.MaxFilterHeightRange instead. + MaxFilterHeightRange uint64 `moved:"Events.MaxFilterHeightRange" toml:"MaxFilterHeightRange,omitzero"` + + // DatabasePath is DEPRECATED and will be removed in a future release. Use Events.DatabasePath instead. + DatabasePath string `moved:"Events.DatabasePath" toml:"DatabasePath,omitempty"` +} + +type EventsConfig struct { // DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - // The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableRealTimeFilterAPI bool // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when EnableEthRPC is true, but can be disabled selectively with this flag. + // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. DisableHistoricFilterAPI bool + // EnableActorEventsAPI enables the Actor events API that enables clients to consume events + // emitted by (smart contracts + built-in Actors). + // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be + // disabled by setting their respective Disable* options. + EnableActorEventsAPI bool + // FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than // this time become eligible for automatic deletion. FilterTTL Duration @@ -820,6 +882,7 @@ type HarmonyDB struct { // The port to find Yugabyte. Blank for default. Port string } + type FaultReporterConfig struct { // EnableConsensusFaultReporter controls whether the node will monitor and // report consensus faults. When enabled, the node will watch for malicious diff --git a/node/hello/cbor_gen.go b/node/hello/cbor_gen.go index 78e950f6f..91a270ff7 100644 --- a/node/hello/cbor_gen.go +++ b/node/hello/cbor_gen.go @@ -35,7 +35,7 @@ func (t *HelloMessage) MarshalCBOR(w io.Writer) error { } // t.HeaviestTipSet ([]cid.Cid) (slice) - if len(t.HeaviestTipSet) > cbg.MaxLength { + if len(t.HeaviestTipSet) > 8192 { return xerrors.Errorf("Slice value in field t.HeaviestTipSet was too long") } @@ -105,7 +105,7 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.HeaviestTipSet: array too large (%d)", extra) } @@ -136,16 +136,16 @@ func (t *HelloMessage) UnmarshalCBOR(r io.Reader) (err error) { t.HeaviestTipSet[i] = c } + } } - // t.HeaviestTipSetHeight (abi.ChainEpoch) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -223,6 +223,7 @@ func (t *LatencyMessage) MarshalCBOR(w io.Writer) error { return err } } + return nil } @@ -252,10 +253,10 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { // t.TArrival (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -277,10 +278,10 @@ func (t *LatencyMessage) UnmarshalCBOR(r io.Reader) (err error) { // t.TSent (int64) (int64) { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) diff --git a/node/impl/full.go b/node/impl/full.go index bc555c8c2..5343bcf0d 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -34,7 +34,9 @@ type FullNodeAPI struct { full.MsigAPI full.WalletAPI full.SyncAPI + full.RaftAPI full.EthAPI + full.ActorEventsAPI DS dtypes.MetadataDS NetworkName dtypes.NetworkName @@ -118,4 +120,12 @@ func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (sta return status, nil } +func (n *FullNodeAPI) RaftState(ctx context.Context) (*api.RaftStateData, error) { + return n.RaftAPI.GetRaftState(ctx) +} + +func (n *FullNodeAPI) RaftLeader(ctx context.Context) (peer.ID, error) { + return n.RaftAPI.Leader(ctx) +} + var _ api.FullNode = &FullNodeAPI{} diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go new file mode 100644 index 000000000..bb192a4cf --- /dev/null +++ b/node/impl/full/actor_events.go @@ -0,0 +1,376 @@ +package full + +import ( + "context" + "fmt" + "time" + + "github.com/ipfs/go-cid" + "github.com/raulk/clock" + "go.uber.org/fx" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/types" +) + +type ActorEventAPI interface { + GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) + SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) +} + +var ( + _ ActorEventAPI = *new(api.FullNode) + _ ActorEventAPI = *new(api.Gateway) +) + +type ChainAccessor interface { + GetHeaviestTipSet() *types.TipSet +} + +type EventFilterManager interface { + Install( + ctx context.Context, + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, + ) (filter.EventFilter, error) + Remove(ctx context.Context, id types.FilterID) error +} + +type ActorEventsAPI struct { + fx.In + ActorEventAPI +} + +type ActorEventHandler struct { + chain ChainAccessor + eventFilterManager EventFilterManager + blockDelay time.Duration + maxFilterHeightRange abi.ChainEpoch + clock clock.Clock +} + +var _ ActorEventAPI = (*ActorEventHandler)(nil) + +func NewActorEventHandler( + chain ChainAccessor, + eventFilterManager EventFilterManager, + blockDelay time.Duration, + maxFilterHeightRange abi.ChainEpoch, +) *ActorEventHandler { + return &ActorEventHandler{ + chain: chain, + eventFilterManager: eventFilterManager, + blockDelay: blockDelay, + maxFilterHeightRange: maxFilterHeightRange, + clock: clock.New(), + } +} + +func NewActorEventHandlerWithClock( + chain ChainAccessor, + eventFilterManager EventFilterManager, + blockDelay time.Duration, + maxFilterHeightRange abi.ChainEpoch, + clock clock.Clock, +) *ActorEventHandler { + return &ActorEventHandler{ + chain: chain, + eventFilterManager: eventFilterManager, + blockDelay: blockDelay, + maxFilterHeightRange: maxFilterHeightRange, + clock: clock, + } +} + +func (a *ActorEventHandler) GetActorEventsRaw(ctx context.Context, evtFilter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + if a.eventFilterManager == nil { + return nil, api.ErrNotSupported + } + + if evtFilter == nil { + evtFilter = &types.ActorEventFilter{} + } + params, err := a.parseFilter(*evtFilter) + if err != nil { + return nil, err + } + + // Install a filter just for this call, collect events, remove the filter + tipSetCid, err := params.GetTipSetCid() + if err != nil { + return nil, fmt.Errorf("failed to get tipset cid: %w", err) + } + f, err := a.eventFilterManager.Install(ctx, params.MinHeight, params.MaxHeight, tipSetCid, evtFilter.Addresses, evtFilter.Fields, false) + if err != nil { + return nil, err + } + defer func() { + // Remove the temporary filter regardless of the original context. + if err := a.eventFilterManager.Remove(context.Background(), f.ID()); err != nil { + log.Warnf("failed to remove filter: %s", err) + } + }() + return getCollected(ctx, f), nil +} + +type filterParams struct { + MinHeight abi.ChainEpoch + MaxHeight abi.ChainEpoch + TipSetKey types.TipSetKey +} + +func (fp filterParams) GetTipSetCid() (cid.Cid, error) { + if fp.TipSetKey.IsEmpty() { + return cid.Undef, nil + } + return fp.TipSetKey.Cid() +} + +func (a *ActorEventHandler) parseFilter(f types.ActorEventFilter) (*filterParams, error) { + if f.TipSetKey != nil && !f.TipSetKey.IsEmpty() { + if f.FromHeight != nil || f.ToHeight != nil { + return nil, fmt.Errorf("cannot specify both TipSetKey and FromHeight/ToHeight") + } + + return &filterParams{ + MinHeight: 0, + MaxHeight: 0, + TipSetKey: *f.TipSetKey, + }, nil + } + + min, max, err := parseHeightRange(a.chain.GetHeaviestTipSet().Height(), f.FromHeight, f.ToHeight, a.maxFilterHeightRange) + if err != nil { + return nil, err + } + + return &filterParams{ + MinHeight: min, + MaxHeight: max, + TipSetKey: types.EmptyTSK, + }, nil +} + +// parseHeightRange is similar to eth's parseBlockRange but with slightly different semantics but +// results in equivalent values that we can plug in to the EventFilterManager. +// +// * Uses "height", allowing for nillable values rather than strings +// * No "latest" and "earliest", those are now represented by nil on the way in and -1 on the way out +// * No option for hex representation +func parseHeightRange(heaviest abi.ChainEpoch, fromHeight, toHeight *abi.ChainEpoch, maxRange abi.ChainEpoch) (minHeight abi.ChainEpoch, maxHeight abi.ChainEpoch, err error) { + if fromHeight != nil && *fromHeight < 0 { + return 0, 0, fmt.Errorf("range 'from' must be greater than or equal to 0") + } + if fromHeight == nil { + minHeight = -1 + } else { + minHeight = *fromHeight + } + if toHeight == nil { + maxHeight = -1 + } else { + maxHeight = *toHeight + } + + // Validate height ranges are within limits set by node operator + if minHeight == -1 && maxHeight > 0 { + // Here the client is looking for events between the head and some future height + if maxHeight-heaviest > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: 'to' height is too far in the future (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight == -1 { + // Here the client is looking for events between some time in the past and the current head + if heaviest-minHeight > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: 'from' height is too far in the past (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight >= 0 { + if minHeight > maxHeight { + return 0, 0, fmt.Errorf("invalid epoch range: 'to' height (%d) must be after 'from' height (%d)", minHeight, maxHeight) + } else if maxHeight-minHeight > maxRange { + return 0, 0, fmt.Errorf("invalid epoch range: range between to and 'from' heights is too large (maximum: %d)", maxRange) + } + } + return minHeight, maxHeight, nil +} + +func (a *ActorEventHandler) SubscribeActorEventsRaw(ctx context.Context, evtFilter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + if a.eventFilterManager == nil { + return nil, api.ErrNotSupported + } + + if evtFilter == nil { + evtFilter = &types.ActorEventFilter{} + } + params, err := a.parseFilter(*evtFilter) + if err != nil { + return nil, err + } + + tipSetCid, err := params.GetTipSetCid() + if err != nil { + return nil, fmt.Errorf("failed to get tipset cid: %w", err) + } + fm, err := a.eventFilterManager.Install(ctx, params.MinHeight, params.MaxHeight, tipSetCid, evtFilter.Addresses, evtFilter.Fields, false) + if err != nil { + return nil, err + } + + // The goal for the code below is to send events on the `out` channel as fast as possible and not + // let it get too far behind the rate at which the events are generated. + // For historical events, we aim to send all events within a single block's time (30s on mainnet). + // This ensures that the client can catch up quickly enough to start receiving new events. + // For ongoing events, we also aim to send all events within a single block's time, so we never + // want to be buffering events (approximately) more than one epoch behind the current head. + // It's approximate because we only update our notion of "current epoch" once per ~blocktime. + + out := make(chan *types.ActorEvent) + + // When we start sending real-time events, we want to make sure that we don't fall behind more + // than one epoch's worth of events (approximately). Capture this value now, before we send + // historical events to allow for a little bit of slack in the historical event sending. + minBacklogHeight := a.chain.GetHeaviestTipSet().Height() - 1 + + go func() { + defer func() { + // tell the caller we're done + close(out) + fm.ClearSubChannel() + if err := a.eventFilterManager.Remove(ctx, fm.ID()); err != nil { + log.Warnf("failed to remove filter: %s", err) + } + }() + + // Handle any historical events that our filter may have picked up ----------------------------- + + evs := getCollected(ctx, fm) + if len(evs) > 0 { + // ensure we get all events out on the channel within one block's time (30s on mainnet) + timer := a.clock.Timer(a.blockDelay) + for _, ev := range evs { + select { + case out <- ev: + case <-timer.C: + log.Errorf("closing event subscription due to slow event sending rate") + timer.Stop() + return + case <-ctx.Done(): + timer.Stop() + return + } + } + timer.Stop() + } + + // for the case where we have a MaxHeight set, we don't get a signal from the filter when we + // reach that height, so we need to check it ourselves, do it now but also in the loop + if params.MaxHeight > 0 && minBacklogHeight+1 >= params.MaxHeight { + return + } + + // Handle ongoing events from the filter ------------------------------------------------------- + + in := make(chan interface{}, 256) + fm.SetSubChannel(in) + + var buffer []*types.ActorEvent + nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) + + collectEvent := func(ev interface{}) bool { + ce, ok := ev.(*filter.CollectedEvent) + if !ok { + log.Errorf("got unexpected value from event filter: %T", ev) + return false + } + + if ce.Height < minBacklogHeight { + // since we mostly care about buffer size, we only trigger a too-slow close when the buffer + // increases, i.e. we collect a new event + log.Errorf("closing event subscription due to slow event sending rate") + return false + } + + buffer = append(buffer, &types.ActorEvent{ + Entries: ce.Entries, + Emitter: ce.EmitterAddr, + Reverted: ce.Reverted, + Height: ce.Height, + TipSetKey: ce.TipSetKey, + MsgCid: ce.MsgCid, + }) + return true + } + + ticker := a.clock.Ticker(a.blockDelay) + defer ticker.Stop() + + for ctx.Err() == nil { + if len(buffer) > 0 { + select { + case ev, ok := <-in: // incoming event + if !ok || !collectEvent(ev) { + return + } + case out <- buffer[0]: // successful send + buffer[0] = nil + buffer = buffer[1:] + case <-ticker.C: + // check that our backlog isn't too big by looking at the oldest event + if buffer[0].Height < minBacklogHeight { + log.Errorf("closing event subscription due to slow event sending rate") + return + } + case <-ctx.Done(): + return + } + } else { + select { + case ev, ok := <-in: // incoming event + if !ok || !collectEvent(ev) { + return + } + case <-ctx.Done(): + return + case <-ticker.C: + currentHeight := a.chain.GetHeaviestTipSet().Height() + if params.MaxHeight > 0 && currentHeight > params.MaxHeight { + // we've reached the filter's MaxHeight, we're done so we can close the channel + return + } + } + } + + if a.clock.Now().After(nextBacklogHeightUpdate) { + minBacklogHeight = a.chain.GetHeaviestTipSet().Height() - 1 + nextBacklogHeightUpdate = a.clock.Now().Add(a.blockDelay) + } + } + }() + + return out, nil +} + +func getCollected(ctx context.Context, f filter.EventFilter) []*types.ActorEvent { + ces := f.TakeCollectedEvents(ctx) + + var out []*types.ActorEvent + + for _, e := range ces { + out = append(out, &types.ActorEvent{ + Entries: e.Entries, + Emitter: e.EmitterAddr, + Reverted: e.Reverted, + Height: e.Height, + TipSetKey: e.TipSetKey, + MsgCid: e.MsgCid, + }) + } + + return out +} diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go new file mode 100644 index 000000000..b4c4e103c --- /dev/null +++ b/node/impl/full/actor_events_test.go @@ -0,0 +1,780 @@ +package full + +import ( + "context" + "fmt" + pseudo "math/rand" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "github.com/raulk/clock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/types" +) + +var testCid = cid.MustParse("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") + +func TestParseHeightRange(t *testing.T) { + testCases := []struct { + name string + heaviest abi.ChainEpoch + from *abi.ChainEpoch + to *abi.ChainEpoch + maxRange abi.ChainEpoch + minOut abi.ChainEpoch + maxOut abi.ChainEpoch + errStr string + }{ + { + name: "fails when both are specified and range is greater than max allowed range", + heaviest: 100, + from: epochPtr(256), + to: epochPtr(512), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + { + name: "fails when min is specified and range is greater than max allowed range", + heaviest: 500, + from: epochPtr(16), + to: nil, + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "'from' height is too far in the past", + }, + { + name: "fails when max is specified and range is greater than max allowed range", + heaviest: 500, + from: nil, + to: epochPtr(65536), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "'to' height is too far in the future", + }, + { + name: "fails when from is greater than to", + heaviest: 100, + from: epochPtr(512), + to: epochPtr(256), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "must be after", + }, + { + name: "works when range is valid (nil from)", + heaviest: 500, + from: nil, + to: epochPtr(48), + maxRange: 1000, + minOut: -1, + maxOut: 48, + }, + { + name: "works when range is valid (nil to)", + heaviest: 500, + from: epochPtr(0), + to: nil, + maxRange: 1000, + minOut: 0, + maxOut: -1, + }, + { + name: "works when range is valid (nil from and to)", + heaviest: 500, + from: nil, + to: nil, + maxRange: 1000, + minOut: -1, + maxOut: -1, + }, + { + name: "works when range is valid and specified", + heaviest: 500, + from: epochPtr(16), + to: epochPtr(48), + maxRange: 1000, + minOut: 16, + maxOut: 48, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + min, max, err := parseHeightRange(tc.heaviest, tc.from, tc.to, tc.maxRange) + req.Equal(tc.minOut, min) + req.Equal(tc.maxOut, max) + if tc.errStr != "" { + t.Log(err) + req.Error(err) + req.Contains(err.Error(), tc.errStr) + } else { + req.NoError(err) + } + }) + } +} + +func TestGetActorEventsRaw(t *testing.T) { + ctx := context.Background() + req := require.New(t) + + const ( + seed = 984651320 + maxFilterHeightRange = 100 + ) + + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + req.NoError(err) + + testCases := []struct { + name string + filter *types.ActorEventFilter + currentHeight int64 + installMinHeight int64 + installMaxHeight int64 + installTipSetKey cid.Cid + installAddresses []address.Address + installKeysWithCodec map[string][]types.ActorEventBlock + installExcludeReverted bool + expectErr string + }{ + { + name: "nil filter", + filter: nil, + installMinHeight: -1, + installMaxHeight: -1, + }, + { + name: "empty filter", + filter: &types.ActorEventFilter{}, + installMinHeight: -1, + installMaxHeight: -1, + }, + { + name: "basic height range filter", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + ToHeight: epochPtr(maxFilterHeightRange), + }, + installMinHeight: 0, + installMaxHeight: maxFilterHeightRange, + }, + { + name: "from, no to height", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }, + currentHeight: maxFilterHeightRange - 1, + installMinHeight: 0, + installMaxHeight: -1, + }, + { + name: "to, no from height", + filter: &types.ActorEventFilter{ + ToHeight: epochPtr(maxFilterHeightRange - 1), + }, + installMinHeight: -1, + installMaxHeight: maxFilterHeightRange - 1, + }, + { + name: "from, no to height, too far", + filter: &types.ActorEventFilter{ + FromHeight: epochPtr(0), + }, + currentHeight: maxFilterHeightRange + 1, + expectErr: "invalid epoch range: 'from' height is too far in the past", + }, + { + name: "to, no from height, too far", + filter: &types.ActorEventFilter{ + ToHeight: epochPtr(maxFilterHeightRange + 1), + }, + currentHeight: 0, + expectErr: "invalid epoch range: 'to' height is too far in the future", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + efm := newMockEventFilterManager(t) + collectedEvents := makeCollectedEvents(t, rng, 0, 1, 10) + filter := newMockFilter(ctx, t, rng, collectedEvents) + + if tc.expectErr == "" { + efm.expectInstall(abi.ChainEpoch(tc.installMinHeight), abi.ChainEpoch(tc.installMaxHeight), tc.installTipSetKey, tc.installAddresses, tc.installKeysWithCodec, tc.installExcludeReverted, filter) + } + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, tc.currentHeight)}) + req.NoError(err) + chain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandler(chain, efm, 50*time.Millisecond, maxFilterHeightRange) + + gotEvents, err := handler.GetActorEventsRaw(ctx, tc.filter) + if tc.expectErr != "" { + req.Error(err) + req.Contains(err.Error(), tc.expectErr) + } else { + req.NoError(err) + expectedEvents := collectedToActorEvents(collectedEvents) + req.Equal(expectedEvents, gotEvents) + efm.requireRemoved(filter.ID()) + } + }) + } +} + +func TestSubscribeActorEventsRaw(t *testing.T) { + const ( + seed = 984651320 + maxFilterHeightRange = 100 + blockDelay = 30 * time.Second + filterStartHeight = 0 + currentHeight = 10 + finishHeight = 20 + eventsPerEpoch = 2 + ) + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + mockClock := clock.NewMock() + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + for _, tc := range []struct { + name string + receiveSpeed time.Duration // how fast will we receive all events _per epoch_ + expectComplete bool // do we expect this to succeed? + endEpoch int // -1 for no end + }{ + {"fast", 0, true, -1}, + {"fast with end", 0, true, finishHeight}, + {"half block speed", blockDelay / 2, true, -1}, + {"half block speed with end", blockDelay / 2, true, finishHeight}, + // testing exactly blockDelay is a border case and will be flaky + {"1.5 block speed", blockDelay * 3 / 2, false, -1}, + {"twice block speed", blockDelay * 2, false, -1}, + } { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + + mockClock.Set(time.Now()) + mockFilterManager := newMockEventFilterManager(t) + allEvents := makeCollectedEvents(t, rng, filterStartHeight, eventsPerEpoch, finishHeight) + historicalEvents := allEvents[0 : (currentHeight-filterStartHeight)*eventsPerEpoch] + mockFilter := newMockFilter(ctx, t, rng, historicalEvents) + mockFilterManager.expectInstall(abi.ChainEpoch(0), abi.ChainEpoch(tc.endEpoch), cid.Undef, nil, nil, false, mockFilter) + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight)}) + req.NoError(err) + mockChain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandlerWithClock(mockChain, mockFilterManager, blockDelay, maxFilterHeightRange, mockClock) + + aef := &types.ActorEventFilter{FromHeight: epochPtr(0)} + if tc.endEpoch >= 0 { + aef.ToHeight = epochPtr(tc.endEpoch) + } + eventChan, err := handler.SubscribeActorEventsRaw(ctx, aef) + req.NoError(err) + + // assume we can cleanly pick up all historical events in one go + var gotEvents []*types.ActorEvent + for len(gotEvents) < len(historicalEvents) && ctx.Err() == nil { + select { + case e, ok := <-eventChan: + req.True(ok) + gotEvents = append(gotEvents, e) + case <-ctx.Done(): + t.Fatalf("timed out waiting for event") + } + } + req.Equal(collectedToActorEvents(historicalEvents), gotEvents) + + mockClock.Add(blockDelay) + nextReceiveTime := mockClock.Now() + + // Ticker to simulate both time and the chain advancing, including emitting events at + // the right time directly to the filter. + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for thisHeight := int64(currentHeight); ctx.Err() == nil; thisHeight++ { + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, thisHeight)}) + req.NoError(err) + mockChain.setHeaviestTipSet(ts) + + var eventsThisEpoch []*filter.CollectedEvent + if thisHeight <= finishHeight { + eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] + } + for i := 0; i < eventsPerEpoch && ctx.Err() == nil; i++ { + if len(eventsThisEpoch) > 0 { + mockFilter.sendEventToChannel(eventsThisEpoch[0]) + eventsThisEpoch = eventsThisEpoch[1:] + } + select { + case <-time.After(2 * time.Millisecond): // allow everyone to catch a breath + mockClock.Add(blockDelay / eventsPerEpoch) + case <-ctx.Done(): + return + } + } + + if thisHeight == finishHeight+1 && tc.expectComplete && tc.endEpoch < 0 && ctx.Err() == nil { + // at finish+1, for the case where we expect clean completion and there is no ToEpoch + // set on the filter, if we send one more event at the next height so we end up with + // something uncollected in the buffer, causing a disconnect + evt := makeCollectedEvents(t, rng, finishHeight+1, 1, finishHeight+1)[0] + mockFilter.sendEventToChannel(evt) + } // else if endEpoch is set, we expect the chain advance to force closure + } + }() + + // Client collecting events off the channel + + var prematureEnd bool + for thisHeight := int64(currentHeight); thisHeight <= finishHeight && !prematureEnd && ctx.Err() == nil; thisHeight++ { + // delay to simulate latency + select { + case <-mockClock.After(nextReceiveTime.Sub(mockClock.Now())): + case <-ctx.Done(): + t.Fatalf("timed out simulating receive delay") + } + + // collect eventsPerEpoch more events + var newEvents []*types.ActorEvent + for len(newEvents) < eventsPerEpoch && !prematureEnd && ctx.Err() == nil { + select { + case e, ok := <-eventChan: // receive the events from the subscription + if ok { + newEvents = append(newEvents, e) + } else { + prematureEnd = true + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for event") + } + nextReceiveTime = nextReceiveTime.Add(tc.receiveSpeed) + } + + if tc.expectComplete || !prematureEnd { + // sanity check that we got what we expected this epoch + req.Len(newEvents, eventsPerEpoch) + epochEvents := allEvents[(thisHeight)*eventsPerEpoch : (thisHeight+1)*eventsPerEpoch] + req.Equal(collectedToActorEvents(epochEvents), newEvents) + gotEvents = append(gotEvents, newEvents...) + } + } + + req.Equal(tc.expectComplete, !prematureEnd, "expected to complete") + if tc.expectComplete { + req.Len(gotEvents, len(allEvents)) + req.Equal(collectedToActorEvents(allEvents), gotEvents) + } else { + req.NotEqual(len(gotEvents), len(allEvents)) + } + + // cleanup + mockFilter.requireClearSubChannelCalledEventually(500 * time.Millisecond) + mockFilterManager.requireRemovedEventually(mockFilter.ID(), 500*time.Millisecond) + cancel() + wg.Wait() // wait for the chain to stop advancing + }) + } +} + +func TestSubscribeActorEventsRaw_OnlyHistorical(t *testing.T) { + // Similar to TestSubscribeActorEventsRaw but we set an explicit end that caps out at the current height + const ( + seed = 984651320 + maxFilterHeightRange = 100 + blockDelay = 30 * time.Second + filterStartHeight = 0 + currentHeight = 10 + eventsPerEpoch = 2 + ) + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + mockClock := clock.NewMock() + + minerAddr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + for _, tc := range []struct { + name string + blockTimeToComplete float64 // fraction of a block time that it takes to receive all events + expectComplete bool // do we expect this to succeed? + }{ + {"fast", 0, true}, + {"half block speed", 0.5, true}, + {"1.5 block speed", 1.5, false}, + {"twice block speed", 2, false}, + } { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + tc := tc + t.Run(tc.name, func(t *testing.T) { + req := require.New(t) + + mockClock.Set(time.Now()) + mockFilterManager := newMockEventFilterManager(t) + allEvents := makeCollectedEvents(t, rng, filterStartHeight, eventsPerEpoch, currentHeight) + mockFilter := newMockFilter(ctx, t, rng, allEvents) + mockFilterManager.expectInstall(abi.ChainEpoch(0), abi.ChainEpoch(currentHeight), cid.Undef, nil, nil, false, mockFilter) + + ts, err := types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight)}) + req.NoError(err) + mockChain := newMockChainAccessor(t, ts) + + handler := NewActorEventHandlerWithClock(mockChain, mockFilterManager, blockDelay, maxFilterHeightRange, mockClock) + + aef := &types.ActorEventFilter{FromHeight: epochPtr(0), ToHeight: epochPtr(currentHeight)} + eventChan, err := handler.SubscribeActorEventsRaw(ctx, aef) + req.NoError(err) + + var gotEvents []*types.ActorEvent + + // assume we can cleanly pick up all historical events in one go + receiveLoop: + for ctx.Err() == nil { + select { + case e, ok := <-eventChan: + if ok { + gotEvents = append(gotEvents, e) + mockClock.Add(time.Duration(float64(blockDelay) * tc.blockTimeToComplete / float64(len(allEvents)))) + // no need to advance the chain, we're also testing that's not necessary + time.Sleep(2 * time.Millisecond) // catch a breath + } else { + break receiveLoop + } + case <-ctx.Done(): + t.Fatalf("timed out waiting for event, got %d/%d events", len(gotEvents), len(allEvents)) + } + } + if tc.expectComplete { + req.Equal(collectedToActorEvents(allEvents), gotEvents) + } else { + req.NotEqual(len(gotEvents), len(allEvents)) + } + // advance the chain and observe cleanup + ts, err = types.NewTipSet([]*types.BlockHeader{newBlockHeader(minerAddr, currentHeight+1)}) + req.NoError(err) + mockChain.setHeaviestTipSet(ts) + mockClock.Add(blockDelay) + mockFilterManager.requireRemovedEventually(mockFilter.ID(), 1*time.Second) + }) + } +} + +var ( + _ ChainAccessor = (*mockChainAccessor)(nil) + _ filter.EventFilter = (*mockFilter)(nil) + _ EventFilterManager = (*mockEventFilterManager)(nil) +) + +type mockChainAccessor struct { + t *testing.T + ts *types.TipSet + lk sync.Mutex +} + +func newMockChainAccessor(t *testing.T, ts *types.TipSet) *mockChainAccessor { + return &mockChainAccessor{t: t, ts: ts} +} + +func (m *mockChainAccessor) setHeaviestTipSet(ts *types.TipSet) { + m.lk.Lock() + defer m.lk.Unlock() + m.ts = ts +} + +func (m *mockChainAccessor) GetHeaviestTipSet() *types.TipSet { + m.lk.Lock() + defer m.lk.Unlock() + return m.ts +} + +type mockFilter struct { + t *testing.T + ctx context.Context + id types.FilterID + lastTaken time.Time + ch chan<- interface{} + historicalEvents []*filter.CollectedEvent + subChannelCalls int + clearSubChannelCalls int + lk sync.Mutex +} + +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*filter.CollectedEvent) *mockFilter { + t.Helper() + var id [32]byte + _, err := rng.Read(id[:]) + require.NoError(t, err) + return &mockFilter{ + t: t, + ctx: ctx, + id: id, + historicalEvents: historicalEvents, + } +} + +func (m *mockFilter) sendEventToChannel(e *filter.CollectedEvent) { + m.lk.Lock() + defer m.lk.Unlock() + if m.ch != nil { + select { + case m.ch <- e: + case <-m.ctx.Done(): + } + } +} + +func (m *mockFilter) requireClearSubChannelCalledEventually(timeout time.Duration) { + m.t.Helper() + require.Eventually(m.t, + func() bool { + m.lk.Lock() + c := m.clearSubChannelCalls + m.lk.Unlock() + switch c { + case 0: + return false + case 1: + return true + default: + m.t.Fatalf("ClearSubChannel called more than once: %d", c) + return false + } + }, timeout, 10*time.Millisecond, "ClearSubChannel is not called exactly once") +} + +func (m *mockFilter) ID() types.FilterID { + return m.id +} + +func (m *mockFilter) LastTaken() time.Time { + return m.lastTaken +} + +func (m *mockFilter) SetSubChannel(ch chan<- interface{}) { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + m.subChannelCalls++ + m.ch = ch +} + +func (m *mockFilter) ClearSubChannel() { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + m.clearSubChannelCalls++ + m.ch = nil +} + +func (m *mockFilter) TakeCollectedEvents(context.Context) []*filter.CollectedEvent { + e := m.historicalEvents + m.historicalEvents = nil + m.lastTaken = time.Now() + return e +} + +func (m *mockFilter) CollectEvents(context.Context, *filter.TipSetEvents, bool, filter.AddressResolver) error { + m.t.Fatalf("unexpected call to CollectEvents") + return nil +} + +type filterManagerExpectation struct { + minHeight, maxHeight abi.ChainEpoch + tipsetCid cid.Cid + addresses []address.Address + keysWithCodec map[string][]types.ActorEventBlock + excludeReverted bool + returnFilter filter.EventFilter +} + +type mockEventFilterManager struct { + t *testing.T + expectations []filterManagerExpectation + removed []types.FilterID + lk sync.Mutex +} + +func newMockEventFilterManager(t *testing.T) *mockEventFilterManager { + return &mockEventFilterManager{t: t} +} + +func (m *mockEventFilterManager) expectInstall( + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, + returnFilter filter.EventFilter) { + + m.t.Helper() + m.expectations = append(m.expectations, filterManagerExpectation{ + minHeight: minHeight, + maxHeight: maxHeight, + tipsetCid: tipsetCid, + addresses: addresses, + keysWithCodec: keysWithCodec, + excludeReverted: excludeReverted, + returnFilter: returnFilter, + }) +} + +func (m *mockEventFilterManager) requireRemoved(id types.FilterID) { + m.t.Helper() + m.lk.Lock() + defer m.lk.Unlock() + require.Contains(m.t, m.removed, id) +} + +func (m *mockEventFilterManager) requireRemovedEventually(id types.FilterID, timeout time.Duration) { + m.t.Helper() + require.Eventuallyf(m.t, func() bool { + m.lk.Lock() + defer m.lk.Unlock() + if len(m.removed) == 0 { + return false + } + assert.Contains(m.t, m.removed, id) + return true + }, timeout, 10*time.Millisecond, "filter %x not removed", id) +} + +func (m *mockEventFilterManager) Install( + _ context.Context, + minHeight, maxHeight abi.ChainEpoch, + tipsetCid cid.Cid, + addresses []address.Address, + keysWithCodec map[string][]types.ActorEventBlock, + excludeReverted bool, +) (filter.EventFilter, error) { + + require.True(m.t, len(m.expectations) > 0, "unexpected call to Install") + exp := m.expectations[0] + m.expectations = m.expectations[1:] + // check the expectation matches the call then return the attached filter + require.Equal(m.t, exp.minHeight, minHeight) + require.Equal(m.t, exp.maxHeight, maxHeight) + require.Equal(m.t, exp.tipsetCid, tipsetCid) + require.Equal(m.t, exp.addresses, addresses) + require.Equal(m.t, exp.keysWithCodec, keysWithCodec) + require.Equal(m.t, exp.excludeReverted, excludeReverted) + return exp.returnFilter, nil +} + +func (m *mockEventFilterManager) Remove(_ context.Context, id types.FilterID) error { + m.lk.Lock() + defer m.lk.Unlock() + m.removed = append(m.removed, id) + return nil +} + +func newBlockHeader(minerAddr address.Address, height int64) *types.BlockHeader { + return &types.BlockHeader{ + Miner: minerAddr, + Ticket: &types.Ticket{ + VRFProof: []byte("vrf proof0000000vrf proof0000000"), + }, + ElectionProof: &types.ElectionProof{ + VRFProof: []byte("vrf proof0000000vrf proof0000000"), + }, + Parents: []cid.Cid{testCid, testCid}, + ParentMessageReceipts: testCid, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("sign me up")}, + ParentWeight: types.NewInt(123125126212), + Messages: testCid, + Height: abi.ChainEpoch(height), + ParentStateRoot: testCid, + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("sign me up")}, + ParentBaseFee: types.NewInt(3432432843291), + } +} + +func epochPtr(i int) *abi.ChainEpoch { + e := abi.ChainEpoch(i) + return &e +} + +func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEvent { + var out []*types.ActorEvent + for _, c := range collected { + out = append(out, &types.ActorEvent{ + Entries: c.Entries, + Emitter: c.EmitterAddr, + Reverted: c.Reverted, + Height: c.Height, + TipSetKey: c.TipSetKey, + MsgCid: c.MsgCid, + }) + } + return out +} + +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*filter.CollectedEvent { + var out []*filter.CollectedEvent + for h := eventStartHeight; h <= eventEndHeight; h++ { + for i := int64(0); i < eventsPerHeight; i++ { + out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) + } + } + return out +} + +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *filter.CollectedEvent { + addr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(t, err) + + return &filter.CollectedEvent{ + Entries: []types.EventEntry{ + {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, + {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, + }, + EmitterAddr: addr, + EventIdx: 0, + Reverted: false, + Height: height, + TipSetKey: tsKey, + MsgIdx: 0, + MsgCid: testCid, + } +} + +func mkCid(t *testing.T, s string) cid.Cid { + h, err := multihash.Sum([]byte(s), multihash.SHA2_256, -1) + require.NoError(t, err) + return cid.NewCidV1(cid.Raw, h) +} diff --git a/node/impl/full/dummy.go b/node/impl/full/dummy.go index 11ff95a63..9685898c0 100644 --- a/node/impl/full/dummy.go +++ b/node/impl/full/dummy.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) @@ -188,3 +189,17 @@ func (e *EthModuleDummy) EthTraceReplayBlockTransactions(ctx context.Context, bl var _ EthModuleAPI = &EthModuleDummy{} var _ EthEventAPI = &EthModuleDummy{} + +var ErrActorEventModuleDisabled = errors.New("module disabled, enable with Fevm.EnableActorEventsAPI") + +type ActorEventDummy struct{} + +func (a *ActorEventDummy) GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error) { + return nil, ErrActorEventModuleDisabled +} + +func (a *ActorEventDummy) SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error) { + return nil, ErrActorEventModuleDisabled +} + +var _ ActorEventAPI = &ActorEventDummy{} diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index d4298492f..5c3fcac96 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" cbg "github.com/whyrusleeping/cbor-gen" "go.uber.org/fx" "golang.org/x/xerrors" @@ -20,7 +21,6 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" builtintypes "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/builtin/v10/evm" "github.com/filecoin-project/go-state-types/exitcode" @@ -42,6 +42,8 @@ import ( var ErrUnsupported = errors.New("unsupported method") +const maxEthFeeHistoryRewardPercentiles = 100 + type EthModuleAPI interface { EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) @@ -135,7 +137,7 @@ type EthModule struct { var _ EthModuleAPI = (*EthModule)(nil) -type EthEvent struct { +type EthEventHandler struct { Chain *store.ChainStore EventFilterManager *filter.EventFilterManager TipSetFilterManager *filter.TipSetFilterManager @@ -146,7 +148,7 @@ type EthEvent struct { SubscribtionCtx context.Context } -var _ EthEventAPI = (*EthEvent)(nil) +var _ EthEventAPI = (*EthEventHandler)(nil) type EthAPI struct { fx.In @@ -698,6 +700,9 @@ func (a *EthModule) EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (eth } rewardPercentiles := make([]float64, 0) if params.RewardPercentiles != nil { + if len(*params.RewardPercentiles) > maxEthFeeHistoryRewardPercentiles { + return ethtypes.EthFeeHistory{}, errors.New("length of the reward percentile array cannot be greater than 100") + } rewardPercentiles = append(rewardPercentiles, *params.RewardPercentiles...) } for i, rp := range rewardPercentiles { @@ -883,19 +888,21 @@ func (a *EthModule) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtyp return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err) } if txHash == nil { - log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid) - continue + return nil, xerrors.Errorf("cannot find transaction hash for cid %s", ir.MsgCid) } - traces := []*ethtypes.EthTrace{} - err = buildTraces(&traces, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), st) + env, err := baseEnvironment(st, ir.Msg.From) if err != nil { - return nil, xerrors.Errorf("failed building traces: %w", err) + return nil, xerrors.Errorf("when processing message %s: %w", ir.MsgCid, err) } - traceBlocks := make([]*ethtypes.EthTraceBlock, 0, len(traces)) - for _, trace := range traces { - traceBlocks = append(traceBlocks, ðtypes.EthTraceBlock{ + err = buildTraces(env, []int{}, &ir.ExecutionTrace) + if err != nil { + return nil, xerrors.Errorf("failed building traces for msg %s: %w", ir.MsgCid, err) + } + + for _, trace := range env.traces { + allTraces = append(allTraces, ðtypes.EthTraceBlock{ EthTrace: trace, BlockHash: blkHash, BlockNumber: int64(ts.Height()), @@ -903,8 +910,6 @@ func (a *EthModule) EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtyp TransactionPosition: msgIdx, }) } - - allTraces = append(allTraces, traceBlocks...) } return allTraces, nil @@ -942,34 +947,36 @@ func (a *EthModule) EthTraceReplayBlockTransactions(ctx context.Context, blkNum return nil, xerrors.Errorf("failed to get transaction hash by cid: %w", err) } if txHash == nil { - log.Warnf("cannot find transaction hash for cid %s", ir.MsgCid) - continue + return nil, xerrors.Errorf("cannot find transaction hash for cid %s", ir.MsgCid) } - var output ethtypes.EthBytes - invokeCreateOnEAM := ir.Msg.To == builtin.EthereumAddressManagerActorAddr && (ir.Msg.Method == builtin.MethodsEAM.Create || ir.Msg.Method == builtin.MethodsEAM.Create2) - if ir.Msg.Method == builtin.MethodsEVM.InvokeContract || invokeCreateOnEAM { - output, err = decodePayload(ir.ExecutionTrace.MsgRct.Return, ir.ExecutionTrace.MsgRct.ReturnCodec) - if err != nil { - return nil, xerrors.Errorf("failed to decode payload: %w", err) + env, err := baseEnvironment(st, ir.Msg.From) + if err != nil { + return nil, xerrors.Errorf("when processing message %s: %w", ir.MsgCid, err) + } + + err = buildTraces(env, []int{}, &ir.ExecutionTrace) + if err != nil { + return nil, xerrors.Errorf("failed building traces for msg %s: %w", ir.MsgCid, err) + } + + var output []byte + if len(env.traces) > 0 { + switch r := env.traces[0].Result.(type) { + case *ethtypes.EthCallTraceResult: + output = r.Output + case *ethtypes.EthCreateTraceResult: + output = r.Code } - } else { - output = encodeFilecoinReturnAsABI(ir.ExecutionTrace.MsgRct.ExitCode, ir.ExecutionTrace.MsgRct.ReturnCodec, ir.ExecutionTrace.MsgRct.Return) } - t := ethtypes.EthTraceReplayBlockTransaction{ + allTraces = append(allTraces, ðtypes.EthTraceReplayBlockTransaction{ Output: output, TransactionHash: *txHash, + Trace: env.traces, StateDiff: nil, VmTrace: nil, - } - - err = buildTraces(&t.Trace, nil, []int{}, ir.ExecutionTrace, int64(ts.Height()), st) - if err != nil { - return nil, xerrors.Errorf("failed building traces: %w", err) - } - - allTraces = append(allTraces, &t) + }) } return allTraces, nil @@ -1205,7 +1212,7 @@ func (a *EthModule) EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam e return ethtypes.EthBytes{}, nil } -func (e *EthEvent) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } @@ -1222,7 +1229,7 @@ func (e *EthEvent) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilte return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEvent) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { if e.FilterStore == nil { return nil, api.ErrNotSupported } @@ -1244,7 +1251,7 @@ func (e *EthEvent) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilte return nil, xerrors.Errorf("unknown filter type") } -func (e *EthEvent) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { +func (e *EthEventHandler) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { if e.FilterStore == nil { return nil, api.ErrNotSupported } @@ -1262,7 +1269,65 @@ func (e *EthEvent) EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID return nil, xerrors.Errorf("wrong filter type") } -func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*filter.EventFilter, error) { +// parseBlockRange is similar to actor event's parseHeightRange but with slightly different semantics +// +// * "block" instead of "height" +// * strings that can have "latest" and "earliest" and nil +// * hex strings for actual heights +func parseBlockRange(heaviest abi.ChainEpoch, fromBlock, toBlock *string, maxRange abi.ChainEpoch) (minHeight abi.ChainEpoch, maxHeight abi.ChainEpoch, err error) { + if fromBlock == nil || *fromBlock == "latest" || len(*fromBlock) == 0 { + minHeight = heaviest + } else if *fromBlock == "earliest" { + minHeight = 0 + } else { + if !strings.HasPrefix(*fromBlock, "0x") { + return 0, 0, xerrors.Errorf("FromBlock is not a hex") + } + epoch, err := ethtypes.EthUint64FromHex(*fromBlock) + if err != nil { + return 0, 0, xerrors.Errorf("invalid epoch") + } + minHeight = abi.ChainEpoch(epoch) + } + + if toBlock == nil || *toBlock == "latest" || len(*toBlock) == 0 { + // here latest means the latest at the time + maxHeight = -1 + } else if *toBlock == "earliest" { + maxHeight = 0 + } else { + if !strings.HasPrefix(*toBlock, "0x") { + return 0, 0, xerrors.Errorf("ToBlock is not a hex") + } + epoch, err := ethtypes.EthUint64FromHex(*toBlock) + if err != nil { + return 0, 0, xerrors.Errorf("invalid epoch") + } + maxHeight = abi.ChainEpoch(epoch) + } + + // Validate height ranges are within limits set by node operator + if minHeight == -1 && maxHeight > 0 { + // Here the client is looking for events between the head and some future height + if maxHeight-heaviest > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: to block is too far in the future (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight == -1 { + // Here the client is looking for events between some time in the past and the current head + if heaviest-minHeight > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: from block is too far in the past (maximum: %d)", maxRange) + } + } else if minHeight >= 0 && maxHeight >= 0 { + if minHeight > maxHeight { + return 0, 0, xerrors.Errorf("invalid epoch range: to block (%d) must be after from block (%d)", minHeight, maxHeight) + } else if maxHeight-minHeight > maxRange { + return 0, 0, xerrors.Errorf("invalid epoch range: range between to and from blocks is too large (maximum: %d)", maxRange) + } + } + return minHeight, maxHeight, nil +} + +func (e *EthEventHandler) installEthFilterSpec(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (filter.EventFilter, error) { var ( minHeight abi.ChainEpoch maxHeight abi.ChainEpoch @@ -1278,64 +1343,11 @@ func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtype tipsetCid = filterSpec.BlockHash.ToCid() } else { - if filterSpec.FromBlock == nil || *filterSpec.FromBlock == "latest" { - ts := e.Chain.GetHeaviestTipSet() - minHeight = ts.Height() - } else if *filterSpec.FromBlock == "earliest" { - minHeight = 0 - } else if *filterSpec.FromBlock == "pending" { - return nil, api.ErrNotSupported - } else { - if !strings.HasPrefix(*filterSpec.FromBlock, "0x") { - return nil, xerrors.Errorf("FromBlock is not a hex") - } - epoch, err := ethtypes.EthUint64FromHex(*filterSpec.FromBlock) - if err != nil { - return nil, xerrors.Errorf("invalid epoch") - } - minHeight = abi.ChainEpoch(epoch) + var err error + minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height(), filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) + if err != nil { + return nil, err } - - if filterSpec.ToBlock == nil || *filterSpec.ToBlock == "latest" { - // here latest means the latest at the time - maxHeight = -1 - } else if *filterSpec.ToBlock == "earliest" { - maxHeight = 0 - } else if *filterSpec.ToBlock == "pending" { - return nil, api.ErrNotSupported - } else { - if !strings.HasPrefix(*filterSpec.ToBlock, "0x") { - return nil, xerrors.Errorf("ToBlock is not a hex") - } - epoch, err := ethtypes.EthUint64FromHex(*filterSpec.ToBlock) - if err != nil { - return nil, xerrors.Errorf("invalid epoch") - } - maxHeight = abi.ChainEpoch(epoch) - } - - // Validate height ranges are within limits set by node operator - if minHeight == -1 && maxHeight > 0 { - // Here the client is looking for events between the head and some future height - ts := e.Chain.GetHeaviestTipSet() - if maxHeight-ts.Height() > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: to block is too far in the future (maximum: %d)", e.MaxFilterHeightRange) - } - } else if minHeight >= 0 && maxHeight == -1 { - // Here the client is looking for events between some time in the past and the current head - ts := e.Chain.GetHeaviestTipSet() - if ts.Height()-minHeight > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: from block is too far in the past (maximum: %d)", e.MaxFilterHeightRange) - } - - } else if minHeight >= 0 && maxHeight >= 0 { - if minHeight > maxHeight { - return nil, xerrors.Errorf("invalid epoch range: to block (%d) must be after from block (%d)", minHeight, maxHeight) - } else if maxHeight-minHeight > e.MaxFilterHeightRange { - return nil, xerrors.Errorf("invalid epoch range: range between to and from blocks is too large (maximum: %d)", e.MaxFilterHeightRange) - } - } - } // Convert all addresses to filecoin f4 addresses @@ -1352,10 +1364,23 @@ func (e *EthEvent) installEthFilterSpec(ctx context.Context, filterSpec *ethtype return nil, err } - return e.EventFilterManager.Install(ctx, minHeight, maxHeight, tipsetCid, addresses, keys) + return e.EventFilterManager.Install(ctx, minHeight, maxHeight, tipsetCid, addresses, keysToKeysWithCodec(keys), true) } -func (e *EthEvent) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) { +func keysToKeysWithCodec(keys map[string][][]byte) map[string][]types.ActorEventBlock { + keysWithCodec := make(map[string][]types.ActorEventBlock) + for k, v := range keys { + for _, vv := range v { + keysWithCodec[k] = append(keysWithCodec[k], types.ActorEventBlock{ + Codec: uint64(multicodec.Raw), // FEVM smart contract events are always encoded with the `raw` Codec. + Value: vv, + }) + } + } + return keysWithCodec +} + +func (e *EthEventHandler) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.EventFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1377,7 +1402,7 @@ func (e *EthEvent) EthNewFilter(ctx context.Context, filterSpec *ethtypes.EthFil return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) { +func (e *EthEventHandler) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.TipSetFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1400,7 +1425,7 @@ func (e *EthEvent) EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) { +func (e *EthEventHandler) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) { if e.FilterStore == nil || e.MemPoolFilterManager == nil { return ethtypes.EthFilterID{}, api.ErrNotSupported } @@ -1423,7 +1448,7 @@ func (e *EthEvent) EthNewPendingTransactionFilter(ctx context.Context) (ethtypes return ethtypes.EthFilterID(f.ID()), nil } -func (e *EthEvent) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) { +func (e *EthEventHandler) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) { if e.FilterStore == nil { return false, api.ErrNotSupported } @@ -1443,9 +1468,9 @@ func (e *EthEvent) EthUninstallFilter(ctx context.Context, id ethtypes.EthFilter return true, nil } -func (e *EthEvent) uninstallFilter(ctx context.Context, f filter.Filter) error { +func (e *EthEventHandler) uninstallFilter(ctx context.Context, f filter.Filter) error { switch f.(type) { - case *filter.EventFilter: + case filter.EventFilter: err := e.EventFilterManager.Remove(ctx, f.ID()) if err != nil && !errors.Is(err, filter.ErrFilterNotFound) { return err @@ -1473,7 +1498,7 @@ const ( EthSubscribeEventTypePendingTransactions = "newPendingTransactions" ) -func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) { +func (e *EthEventHandler) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) { params, err := jsonrpc.DecodeParams[ethtypes.EthSubscribeParams](p) if err != nil { return ethtypes.EthSubscriptionID{}, xerrors.Errorf("decoding params: %w", err) @@ -1526,7 +1551,7 @@ func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethty } } - f, err := e.EventFilterManager.Install(ctx, -1, -1, cid.Undef, addresses, keys) + f, err := e.EventFilterManager.Install(ctx, -1, -1, cid.Undef, addresses, keysToKeysWithCodec(keys), true) if err != nil { // clean up any previous filters added and stop the sub _, _ = e.EthUnsubscribe(ctx, sub.id) @@ -1549,7 +1574,7 @@ func (e *EthEvent) EthSubscribe(ctx context.Context, p jsonrpc.RawParams) (ethty return sub.id, nil } -func (e *EthEvent) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) { +func (e *EthEventHandler) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) { if e.SubManager == nil { return false, api.ErrNotSupported } @@ -1563,7 +1588,7 @@ func (e *EthEvent) EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscripti } // GC runs a garbage collection loop, deleting filters that have not been used within the ttl window -func (e *EthEvent) GC(ctx context.Context, ttl time.Duration) { +func (e *EthEventHandler) GC(ctx context.Context, ttl time.Duration) { if e.FilterStore == nil { return } diff --git a/node/impl/full/eth_event.go b/node/impl/full/eth_events.go similarity index 98% rename from node/impl/full/eth_event.go rename to node/impl/full/eth_events.go index 54dd164ac..81ecef64b 100644 --- a/node/impl/full/eth_event.go +++ b/node/impl/full/eth_events.go @@ -39,9 +39,9 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes // Topics must be non-nil, even if empty. So we might as well pre-allocate for 4 (the max). topics = make([]ethtypes.EthHash, 0, 4) for _, entry := range entries { - // Drop events with non-raw topics to avoid mistakes. + // Drop events with non-raw topics. Built-in actors emit CBOR, and anything else would be + // invalid anyway. if entry.Codec != cid.Raw { - log.Warnw("did not expect an event entry with a non-raw codec", "codec", entry.Codec, "key", entry.Key) return nil, nil, false } // Check if the key is t1..t4 diff --git a/node/impl/full/eth_test.go b/node/impl/full/eth_test.go index c364a4873..6f9d8f297 100644 --- a/node/impl/full/eth_test.go +++ b/node/impl/full/eth_test.go @@ -1,18 +1,97 @@ package full import ( + "bytes" "encoding/hex" + "fmt" "testing" "github.com/ipfs/go-cid" + "github.com/multiformats/go-multicodec" "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) +func TestParseBlockRange(t *testing.T) { + pstring := func(s string) *string { return &s } + + tcs := map[string]struct { + heaviest abi.ChainEpoch + from *string + to *string + maxRange abi.ChainEpoch + minOut abi.ChainEpoch + maxOut abi.ChainEpoch + errStr string + }{ + "fails when both are specified and range is greater than max allowed range": { + heaviest: 100, + from: pstring("0x100"), + to: pstring("0x200"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + "fails when min is specified and range is greater than max allowed range": { + heaviest: 500, + from: pstring("0x10"), + to: pstring("latest"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too far in the past", + }, + "fails when max is specified and range is greater than max allowed range": { + heaviest: 500, + from: pstring("earliest"), + to: pstring("0x10000"), + maxRange: 10, + minOut: 0, + maxOut: 0, + errStr: "too large", + }, + "works when range is valid": { + heaviest: 500, + from: pstring("earliest"), + to: pstring("latest"), + maxRange: 1000, + minOut: 0, + maxOut: -1, + }, + "works when range is valid and specified": { + heaviest: 500, + from: pstring("0x10"), + to: pstring("0x30"), + maxRange: 1000, + minOut: 16, + maxOut: 48, + }, + } + + for name, tc := range tcs { + tc2 := tc + t.Run(name, func(t *testing.T) { + min, max, err := parseBlockRange(tc2.heaviest, tc2.from, tc2.to, tc2.maxRange) + require.Equal(t, tc2.minOut, min) + require.Equal(t, tc2.maxOut, max) + if tc2.errStr != "" { + fmt.Println(err) + require.Error(t, err) + require.Contains(t, err.Error(), tc2.errStr) + } else { + require.NoError(t, err) + } + }) + } +} + func TestEthLogFromEvent(t *testing.T) { // basic empty data, topics, ok := ethLogFromEvent(nil) @@ -177,3 +256,40 @@ func TestABIEncoding(t *testing.T) { require.Equal(t, expectedBytes, encodeAsABIHelper(22, 81, dataBytes)) } + +func TestDecodePayload(t *testing.T) { + // "empty" + b, err := decodePayload(nil, 0) + require.NoError(t, err) + require.Empty(t, b) + + // raw empty + _, err = decodePayload(nil, uint64(multicodec.Raw)) + require.NoError(t, err) + require.Empty(t, b) + + // raw non-empty + b, err = decodePayload([]byte{1}, uint64(multicodec.Raw)) + require.NoError(t, err) + require.EqualValues(t, b, []byte{1}) + + // Invalid cbor bytes + _, err = decodePayload(nil, uint64(multicodec.DagCbor)) + require.Error(t, err) + + // valid cbor bytes + var w bytes.Buffer + require.NoError(t, cbg.WriteByteArray(&w, []byte{1})) + b, err = decodePayload(w.Bytes(), uint64(multicodec.DagCbor)) + require.NoError(t, err) + require.EqualValues(t, b, []byte{1}) + + // regular cbor also works. + b, err = decodePayload(w.Bytes(), uint64(multicodec.Cbor)) + require.NoError(t, err) + require.EqualValues(t, b, []byte{1}) + + // random codec should fail + _, err = decodePayload(w.Bytes(), 42) + require.Error(t, err) +} diff --git a/node/impl/full/eth_trace.go b/node/impl/full/eth_trace.go index 9d24394d7..e4e5d794d 100644 --- a/node/impl/full/eth_trace.go +++ b/node/impl/full/eth_trace.go @@ -2,15 +2,22 @@ package full import ( "bytes" + "fmt" "github.com/multiformats/go-multicodec" cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/builtin" - "github.com/filecoin-project/go-state-types/builtin/v10/evm" + eam12 "github.com/filecoin-project/go-state-types/builtin/v12/eam" + evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm" + init12 "github.com/filecoin-project/go-state-types/builtin/v12/init" + "github.com/filecoin-project/go-state-types/exitcode" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/evm" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" @@ -18,10 +25,6 @@ import ( // decodePayload is a utility function which decodes the payload using the given codec func decodePayload(payload []byte, codec uint64) (ethtypes.EthBytes, error) { - if len(payload) == 0 { - return nil, nil - } - switch multicodec.Code(codec) { case multicodec.Identity: return nil, nil @@ -38,212 +41,565 @@ func decodePayload(payload []byte, codec uint64) (ethtypes.EthBytes, error) { return nil, xerrors.Errorf("decodePayload: unsupported codec: %d", codec) } +func decodeParams[P any, T interface { + *P + cbg.CBORUnmarshaler +}](msg *types.MessageTrace) (T, error) { + var params T = new(P) + switch msg.ParamsCodec { + case uint64(multicodec.DagCbor), uint64(multicodec.Cbor): + default: + return nil, xerrors.Errorf("Method called with unexpected codec %d", msg.ParamsCodec) + } + + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("failed to decode params: %w", err) + } + + return params, nil +} + +func decodeReturn[R any, T interface { + *R + cbg.CBORUnmarshaler +}](ret *types.ReturnTrace) (T, error) { + var retval T = new(R) + switch ret.ReturnCodec { + case uint64(multicodec.DagCbor), uint64(multicodec.Cbor): + default: + return nil, xerrors.Errorf("Method returned an unexpected codec %d", ret.ReturnCodec) + } + + if err := retval.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil { + return nil, xerrors.Errorf("failed to decode return value: %w", err) + } + + return retval, nil +} + +func find[T any](values []T, cb func(t *T) *T) *T { + for i := range values { + if o := cb(&values[i]); o != nil { + return o + } + } + return nil +} + +type environment struct { + caller ethtypes.EthAddress + isEVM bool + subtraceCount int + traces []*ethtypes.EthTrace + lastByteCode *ethtypes.EthAddress +} + +func baseEnvironment(st *state.StateTree, from address.Address) (*environment, error) { + sender, err := lookupEthAddress(from, st) + if err != nil { + return nil, xerrors.Errorf("top-level message sender %s s could not be found: %w", from, err) + } + return &environment{caller: sender}, nil +} + +func traceToAddress(act *types.ActorTrace) ethtypes.EthAddress { + if act.State.Address != nil { + if addr, err := ethtypes.EthAddressFromFilecoinAddress(*act.State.Address); err == nil { + return addr + } + } + return ethtypes.EthAddressFromActorID(act.Id) +} + +// traceIsEVMOrEAM returns true if the trace is a call to an EVM or EAM actor. +func traceIsEVMOrEAM(et *types.ExecutionTrace) bool { + if et.InvokedActor == nil { + return false + } + return builtinactors.IsEvmActor(et.InvokedActor.State.Code) || + et.InvokedActor.Id != abi.ActorID(builtin.EthereumAddressManagerActorID) +} + +func traceErrMsg(et *types.ExecutionTrace) string { + code := et.MsgRct.ExitCode + + if code.IsSuccess() { + return "" + } + + // EVM tools often expect this literal string. + if code == exitcode.SysErrOutOfGas { + return "out of gas" + } + + // indicate when we have a "system" error. + if code < exitcode.FirstActorErrorCode { + return fmt.Sprintf("vm error: %s", code) + } + + // handle special exit codes from the EVM/EAM. + if traceIsEVMOrEAM(et) { + switch code { + case evm.ErrReverted: + return "Reverted" // capitalized for compatibility + case evm.ErrInvalidInstruction: + return "invalid instruction" + case evm.ErrUndefinedInstruction: + return "undefined instruction" + case evm.ErrStackUnderflow: + return "stack underflow" + case evm.ErrStackOverflow: + return "stack overflow" + case evm.ErrIllegalMemoryAccess: + return "illegal memory access" + case evm.ErrBadJumpdest: + return "invalid jump destination" + case evm.ErrSelfdestructFailed: + return "self destruct failed" + } + } + // everything else... + return fmt.Sprintf("actor error: %s", code.Error()) +} + // buildTraces recursively builds the traces for a given ExecutionTrace by walking the subcalls -func buildTraces(traces *[]*ethtypes.EthTrace, parent *ethtypes.EthTrace, addr []int, et types.ExecutionTrace, height int64, st *state.StateTree) error { - // lookup the eth address from the from/to addresses. Note that this may fail but to support - // this we need to include the ActorID in the trace. For now, just log a warning and skip - // this trace. - // - // TODO: Add ActorID in trace, see https://github.com/filecoin-project/lotus/pull/11100#discussion_r1302442288 - from, err := lookupEthAddress(et.Msg.From, st) +func buildTraces(env *environment, addr []int, et *types.ExecutionTrace) error { + trace, recurseInto, err := buildTrace(env, addr, et) if err != nil { - log.Warnf("buildTraces: failed to lookup from address %s: %v", et.Msg.From, err) - return nil + return xerrors.Errorf("at trace %v: %w", addr, err) } - to, err := lookupEthAddress(et.Msg.To, st) - if err != nil { - log.Warnf("buildTraces: failed to lookup to address %s: %w", et.Msg.To, err) + + if trace != nil { + env.traces = append(env.traces, trace) + env.subtraceCount++ + } + + // Skip if there's nothing more to do and/or `buildTrace` told us to skip this one. + if recurseInto == nil || recurseInto.InvokedActor == nil || len(recurseInto.Subcalls) == 0 { return nil } - trace := ðtypes.EthTrace{ - Action: ethtypes.EthTraceAction{ - From: from, - To: to, - Gas: ethtypes.EthUint64(et.Msg.GasLimit), - Input: nil, - Value: ethtypes.EthBigInt(et.Msg.Value), - - FilecoinFrom: et.Msg.From, - FilecoinTo: et.Msg.To, - FilecoinMethod: et.Msg.Method, - FilecoinCodeCid: et.Msg.CodeCid, - }, - Result: ethtypes.EthTraceResult{ - GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas), - Output: nil, - }, - Subtraces: 0, // will be updated by the children once they are added to the trace - TraceAddress: addr, - - Parent: parent, - LastByteCode: nil, + subEnv := &environment{ + caller: traceToAddress(recurseInto.InvokedActor), + isEVM: builtinactors.IsEvmActor(recurseInto.InvokedActor.State.Code), + traces: env.traces, } - - trace.SetCallType("call") - - if et.Msg.Method == builtin.MethodsEVM.InvokeContract { - log.Debugf("COND1 found InvokeContract call at height: %d", height) - - // TODO: ignore return errors since actors can send gibberish and we don't want - // to fail the whole trace in that case - trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } - trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } - } else if et.Msg.To == builtin.EthereumAddressManagerActorAddr && - et.Msg.Method == builtin.MethodsEAM.CreateExternal { - log.Debugf("COND2 found CreateExternal call at height: %d", height) - trace.Action.Input, err = decodePayload(et.Msg.Params, et.Msg.ParamsCodec) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } - - if et.MsgRct.ExitCode.IsSuccess() { - // ignore return value - trace.Result.Output = nil - } else { - // return value is the error message - trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec) - if err != nil { - return xerrors.Errorf("buildTraces: %w", err) - } - } - - // treat this as a contract creation - trace.SetCallType("create") - } else { - // we are going to assume a native method, but we may change it in one of the edge cases below - // TODO: only do this if we know it's a native method (optimization) - trace.Action.Input = encodeFilecoinParamsAsABI(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params) - trace.Result.Output = encodeFilecoinReturnAsABI(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return) - } - - // TODO: is it OK to check this here or is this only specific to certain edge case (evm to evm)? - if et.Msg.ReadOnly { - trace.SetCallType("staticcall") - } - - // there are several edge cases that require special handling when displaying the traces. Note that while iterating over - // the traces we update the trace backwards (through the parent pointer) - if parent != nil { - // Handle Native actor creation - // - // Actor A calls to the init actor on method 2 and The init actor creates the target actor B then calls it on method 1 - if parent.Action.FilecoinTo == builtin.InitActorAddr && - parent.Action.FilecoinMethod == builtin.MethodsInit.Exec && - et.Msg.Method == builtin.MethodConstructor { - log.Debugf("COND3 Native actor creation! method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height) - parent.SetCallType("create") - parent.Action.To = to - parent.Action.Input = []byte{0xFE} - parent.Result.Output = nil - - // there should never be any subcalls when creating a native actor - // - // TODO: add support for native actors calling another when created - return nil - } - - // Handle EVM contract creation - // - // To detect EVM contract creation we need to check for the following sequence of events: - // - // 1) EVM contract A calls the EAM (Ethereum Address Manager) on method 2 (create) or 3 (create2). - // 2) The EAM calls the init actor on method 3 (Exec4). - // 3) The init actor creates the target actor B then calls it on method 1. - if parent.Parent != nil { - calledCreateOnEAM := parent.Parent.Action.FilecoinTo == builtin.EthereumAddressManagerActorAddr && - (parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create || parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create2) - eamCalledInitOnExec4 := parent.Action.FilecoinTo == builtin.InitActorAddr && - parent.Action.FilecoinMethod == builtin.MethodsInit.Exec4 - initCreatedActor := trace.Action.FilecoinMethod == builtin.MethodConstructor - - // TODO: We need to handle failures in contract creations and support resurrections on an existing but dead EVM actor) - if calledCreateOnEAM && eamCalledInitOnExec4 && initCreatedActor { - log.Debugf("COND4 EVM contract creation method:%d, code:%s, height:%d", et.Msg.Method, et.Msg.CodeCid.String(), height) - - if parent.Parent.Action.FilecoinMethod == builtin.MethodsEAM.Create { - parent.Parent.SetCallType("create") - } else { - parent.Parent.SetCallType("create2") - } - - // update the parent.parent to make this - parent.Parent.Action.To = trace.Action.To - parent.Parent.Subtraces = 0 - - // delete the parent (the EAM) and skip the current trace (init) - *traces = (*traces)[:len(*traces)-1] - - return nil - } - } - - if builtinactors.IsEvmActor(parent.Action.FilecoinCodeCid) { - // Handle delegate calls - // - // 1) Look for trace from an EVM actor to itself on InvokeContractDelegate, method 6. - // 2) Check that the previous trace calls another actor on method 3 (GetByteCode) and they are at the same level (same parent) - // 3) Treat this as a delegate call to actor A. - if parent.LastByteCode != nil && trace.Action.From == trace.Action.To && - trace.Action.FilecoinMethod == builtin.MethodsEVM.InvokeContractDelegate { - log.Debugf("COND7 found delegate call, height: %d", height) - prev := parent.LastByteCode - if prev.Action.From == trace.Action.From && prev.Action.FilecoinMethod == builtin.MethodsEVM.GetBytecode && prev.Parent == trace.Parent { - trace.SetCallType("delegatecall") - trace.Action.To = prev.Action.To - - var dp evm.DelegateCallParams - err := dp.UnmarshalCBOR(bytes.NewReader(et.Msg.Params)) - if err != nil { - return xerrors.Errorf("failed UnmarshalCBOR: %w", err) - } - trace.Action.Input = dp.Input - - trace.Result.Output, err = decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec) - if err != nil { - return xerrors.Errorf("failed decodePayload: %w", err) - } - } - } else { - // Handle EVM call special casing - // - // Any outbound call from an EVM actor on methods 1-1023 are side-effects from EVM instructions - // and should be dropped from the trace. - if et.Msg.Method > 0 && - et.Msg.Method <= 1023 { - log.Debugf("Infof found outbound call from an EVM actor on method 1-1023 method:%d, code:%s, height:%d", et.Msg.Method, parent.Action.FilecoinCodeCid.String(), height) - - if et.Msg.Method == builtin.MethodsEVM.GetBytecode { - // save the last bytecode trace to handle delegate calls - parent.LastByteCode = trace - } - - return nil - } - } - } - - } - - // we are adding trace to the traces so update the parent subtraces count as it was originally set to zero - if parent != nil { - parent.Subtraces++ - } - - *traces = append(*traces, trace) - - for i, call := range et.Subcalls { - err := buildTraces(traces, trace, append(addr, i), call, height, st) + // Set capacity to the length so each `append` below creates a new slice. Otherwise, we'll + // end up repeatedly mutating previous paths. + addr = addr[:len(addr):len(addr)] + for i := range recurseInto.Subcalls { + err := buildTraces(subEnv, append(addr, subEnv.subtraceCount), &recurseInto.Subcalls[i]) if err != nil { return err } } + trace.Subtraces = subEnv.subtraceCount + env.traces = subEnv.traces return nil } + +// buildTrace processes the passed execution trace and updates the environment, if necessary. +// +// On success, it returns a trace to add (or nil to skip) and the trace recurse into (or nil to skip). +func buildTrace(env *environment, addr []int, et *types.ExecutionTrace) (*ethtypes.EthTrace, *types.ExecutionTrace, error) { + // This function first assumes that the call is a "native" call, then handles all the "not + // native" cases. If we get any unexpected results in any of these special cases, we just + // keep the "native" interpretation and move on. + // + // 1. If we're invoking a contract (even if the caller is a native account/actor), we + // attempt to decode the params/return value as a contract invocation. + // 2. If we're calling the EAM and/or init actor, we try to treat the call as a CREATE. + // 3. Finally, if the caller is an EVM smart contract and it's calling a "private" (1-1023) + // method, we know something special is going on. We look for calls related to + // DELEGATECALL and drop everything else (everything else includes calls triggered by, + // e.g., EXTCODEHASH). + + // If we don't have sufficient funds, or we have a fatal error, or we have some + // other syscall error: skip the entire trace to mimic Ethereum (Ethereum records + // traces _after_ checking things like this). + // + // NOTE: The FFI currently folds all unknown syscall errors into "sys assertion + // failed" which is turned into SysErrFatal. + if len(addr) > 0 { + switch et.MsgRct.ExitCode { + case exitcode.SysErrInsufficientFunds, exitcode.SysErrFatal: + return nil, nil, nil + } + } + + // We may fail before we can even invoke the actor. In that case, we have no 100% reliable + // way of getting its address (e.g., due to reverts) so we're just going to drop the entire + // trace. This is OK (ish) because the call never really "happened". + if et.InvokedActor == nil { + return nil, nil, nil + } + + // Step 2: Decode as a contract invocation + // + // Normal EVM calls. We don't care if the caller/receiver are actually EVM actors, we only + // care if the call _looks_ like an EVM call. If we fail to decode it as an EVM call, we + // fallback on interpreting it as a native call. + if et.Msg.Method == builtin.MethodsEVM.InvokeContract { + return traceEVMCall(env, addr, et) + } + + // Step 3: Decode as a contract deployment + switch et.Msg.To { + // NOTE: this will only catch _direct_ calls to the init actor. Calls through the EAM will + // be caught and _skipped_ below in the next case. + case builtin.InitActorAddr: + switch et.Msg.Method { + case builtin.MethodsInit.Exec, builtin.MethodsInit.Exec4: + return traceNativeCreate(env, addr, et) + } + case builtin.EthereumAddressManagerActorAddr: + switch et.Msg.Method { + case builtin.MethodsEAM.Create, builtin.MethodsEAM.Create2, builtin.MethodsEAM.CreateExternal: + return traceEthCreate(env, addr, et) + } + } + + // Step 4: Handle DELEGATECALL + // + // EVM contracts cannot call methods in the range 1-1023, only the EVM itself can. So, if we + // see a call in this range, we know it's an implementation detail of the EVM and not an + // explicit call by the user. + // + // While the EVM calls several methods in this range (some we've already handled above with + // respect to the EAM), we only care about the ones relevant DELEGATECALL and can _ignore_ + // all the others. + if env.isEVM && et.Msg.Method > 0 && et.Msg.Method < 1024 { + return traceEVMPrivate(env, addr, et) + } + + return traceNativeCall(env, addr, et), et, nil +} + +// Build an EthTrace for a "call" with the given input & output. +func traceCall(env *environment, addr []int, et *types.ExecutionTrace, input, output ethtypes.EthBytes) *ethtypes.EthTrace { + to := traceToAddress(et.InvokedActor) + callType := "call" + if et.Msg.ReadOnly { + callType = "staticcall" + } + return ðtypes.EthTrace{ + Type: "call", + Action: ðtypes.EthCallTraceAction{ + CallType: callType, + From: env.caller, + To: to, + Gas: ethtypes.EthUint64(et.Msg.GasLimit), + Value: ethtypes.EthBigInt(et.Msg.Value), + Input: input, + }, + Result: ðtypes.EthCallTraceResult{ + GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas), + Output: output, + }, + TraceAddress: addr, + Error: traceErrMsg(et), + } +} + +// Build an EthTrace for a "call", parsing the inputs & outputs as a "native" FVM call. +func traceNativeCall(env *environment, addr []int, et *types.ExecutionTrace) *ethtypes.EthTrace { + return traceCall(env, addr, et, + encodeFilecoinParamsAsABI(et.Msg.Method, et.Msg.ParamsCodec, et.Msg.Params), + encodeFilecoinReturnAsABI(et.MsgRct.ExitCode, et.MsgRct.ReturnCodec, et.MsgRct.Return), + ) +} + +// Build an EthTrace for a "call", parsing the inputs & outputs as an EVM call (falling back on +// treating it as a native call). +func traceEVMCall(env *environment, addr []int, et *types.ExecutionTrace) (*ethtypes.EthTrace, *types.ExecutionTrace, error) { + input, err := decodePayload(et.Msg.Params, et.Msg.ParamsCodec) + if err != nil { + log.Debugf("failed to decode contract invocation payload: %w", err) + return traceNativeCall(env, addr, et), et, nil + } + output, err := decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec) + if err != nil { + log.Debugf("failed to decode contract invocation return: %w", err) + return traceNativeCall(env, addr, et), et, nil + } + return traceCall(env, addr, et, input, output), et, nil +} + +// Build an EthTrace for a native "create" operation. This should only be called with an +// ExecutionTrace is an Exec or Exec4 method invocation on the Init actor. +func traceNativeCreate(env *environment, addr []int, et *types.ExecutionTrace) (*ethtypes.EthTrace, *types.ExecutionTrace, error) { + if et.Msg.ReadOnly { + // "create" isn't valid in a staticcall, so we just skip this trace + // (couldn't have created an actor anyways). + // This mimic's the EVM: it doesn't trace CREATE calls when in + // read-only mode. + return nil, nil, nil + } + + subTrace := find(et.Subcalls, func(c *types.ExecutionTrace) *types.ExecutionTrace { + if c.Msg.Method == builtin.MethodConstructor { + return c + } + return nil + }) + if subTrace == nil { + // If we succeed in calling Exec/Exec4 but don't even try to construct + // something, we have a bug in our tracing logic or a mismatch between our + // tracing logic and the actors. + if et.MsgRct.ExitCode.IsSuccess() { + return nil, nil, xerrors.Errorf("successful Exec/Exec4 call failed to call a constructor") + } + // Otherwise, this can happen if creation fails early (bad params, + // out of gas, contract already exists, etc.). The EVM wouldn't + // trace such cases, so we don't either. + // + // NOTE: It's actually impossible to run out of gas before calling + // initcode in the EVM (without running out of gas in the calling + // contract), but this is an equivalent edge-case to InvokedActor + // being nil, so we treat it the same way and skip the entire + // operation. + return nil, nil, nil + } + + // Native actors that aren't the EAM can attempt to call Exec4, but such + // call should fail immediately without ever attempting to construct an + // actor. I'm catching this here because it likely means that there's a bug + // in our trace-conversion logic. + if et.Msg.Method == builtin.MethodsInit.Exec4 { + return nil, nil, xerrors.Errorf("direct call to Exec4 successfully called a constructor!") + } + + var output ethtypes.EthBytes + var createdAddr *ethtypes.EthAddress + if et.MsgRct.ExitCode.IsSuccess() { + // We're supposed to put the "installed bytecode" here. But this + // isn't an EVM actor, so we just put some invalid bytecode (this is + // the answer you'd get if you called EXTCODECOPY on a native + // non-account actor, anyways). + output = []byte{0xFE} + + // Extract the address of the created actor from the return value. + initReturn, err := decodeReturn[init12.ExecReturn](&et.MsgRct) + if err != nil { + return nil, nil, xerrors.Errorf("failed to decode init params after a successful Init.Exec call: %w", err) + } + actorId, err := address.IDFromAddress(initReturn.IDAddress) + if err != nil { + return nil, nil, xerrors.Errorf("failed to extract created actor ID from address: %w", err) + } + ethAddr := ethtypes.EthAddressFromActorID(abi.ActorID(actorId)) + createdAddr = ðAddr + } + + return ðtypes.EthTrace{ + Type: "create", + Action: ðtypes.EthCreateTraceAction{ + From: env.caller, + Gas: ethtypes.EthUint64(et.Msg.GasLimit), + Value: ethtypes.EthBigInt(et.Msg.Value), + // If we get here, this isn't a native EVM create. Those always go through + // the EAM. So we have no "real" initcode and must use the sentinel value + // for "invalid" initcode. + Init: []byte{0xFE}, + }, + Result: ðtypes.EthCreateTraceResult{ + GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas), + Address: createdAddr, + Code: output, + }, + TraceAddress: addr, + Error: traceErrMsg(et), + }, subTrace, nil +} + +// Assert that these are all identical so we can simplify the below code and decode once. +var _ *eam12.Return = (*eam12.Return)((*eam12.CreateReturn)(nil)) +var _ *eam12.Return = (*eam12.Return)((*eam12.Create2Return)(nil)) +var _ *eam12.Return = (*eam12.Return)((*eam12.CreateExternalReturn)(nil)) + +// Decode the parameters and return value of an EVM smart contract creation through the EAM. This +// should only be called with an ExecutionTrace for a Create, Create2, or CreateExternal method +// invocation on the EAM. +func decodeCreateViaEAM(et *types.ExecutionTrace) (initcode []byte, addr *ethtypes.EthAddress, err error) { + switch et.Msg.Method { + case builtin.MethodsEAM.Create: + params, err := decodeParams[eam12.CreateParams](&et.Msg) + if err != nil { + return nil, nil, err + } + initcode = params.Initcode + case builtin.MethodsEAM.Create2: + params, err := decodeParams[eam12.Create2Params](&et.Msg) + if err != nil { + return nil, nil, err + } + initcode = params.Initcode + case builtin.MethodsEAM.CreateExternal: + input, err := decodePayload(et.Msg.Params, et.Msg.ParamsCodec) + if err != nil { + return nil, nil, err + } + initcode = input + default: + return nil, nil, xerrors.Errorf("unexpected CREATE method %d", et.Msg.Method) + } + ret, err := decodeReturn[eam12.CreateReturn](&et.MsgRct) + if err != nil { + return nil, (*ethtypes.EthAddress)(&ret.EthAddress), err + } + return initcode, (*ethtypes.EthAddress)(&ret.EthAddress), nil +} + +// Build an EthTrace for an EVM "create" operation. This should only be called with an +// ExecutionTrace for a Create, Create2, or CreateExternal method invocation on the EAM. +func traceEthCreate(env *environment, addr []int, et *types.ExecutionTrace) (*ethtypes.EthTrace, *types.ExecutionTrace, error) { + // Same as the Init actor case above, see the comment there. + if et.Msg.ReadOnly { + return nil, nil, nil + } + + // Look for a call to either a constructor or the EVM's resurrect method. + subTrace := find(et.Subcalls, func(et *types.ExecutionTrace) *types.ExecutionTrace { + if et.Msg.To == builtinactors.InitActorAddr { + return find(et.Subcalls, func(et *types.ExecutionTrace) *types.ExecutionTrace { + if et.Msg.Method == builtinactors.MethodConstructor { + return et + } + return nil + }) + } + if et.Msg.Method == builtin.MethodsEVM.Resurrect { + return et + } + return nil + }) + + // Same as the Init actor case above, see the comment there. + if subTrace == nil { + if et.MsgRct.ExitCode.IsSuccess() { + return nil, nil, xerrors.Errorf("successful Create/Create2 call failed to call a constructor") + } + return nil, nil, nil + } + + // Decode inputs & determine create type. + initcode, createdAddr, err := decodeCreateViaEAM(et) + if err != nil { + return nil, nil, xerrors.Errorf("EAM called with invalid params or returned an invalid result, but it still tried to construct the contract: %w", err) + } + + var output ethtypes.EthBytes + // Handle the output. + switch et.MsgRct.ExitCode { + case 0: // success + // We're _supposed_ to include the contracts bytecode here, but we + // can't do that reliably (e.g., if some part of the trace reverts). + // So we don't try and include a sentinel "impossible bytecode" + // value (the value specified by EIP-3541). + output = []byte{0xFE} + case 33: // Reverted, parse the revert message. + // If we managed to call the constructor, parse/return its revert message. If we + // fail, we just return no output. + output, _ = decodePayload(subTrace.MsgRct.Return, subTrace.MsgRct.ReturnCodec) + } + + return ðtypes.EthTrace{ + Type: "create", + Action: ðtypes.EthCreateTraceAction{ + From: env.caller, + Gas: ethtypes.EthUint64(et.Msg.GasLimit), + Value: ethtypes.EthBigInt(et.Msg.Value), + Init: initcode, + }, + Result: ðtypes.EthCreateTraceResult{ + GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas), + Address: createdAddr, + Code: output, + }, + TraceAddress: addr, + Error: traceErrMsg(et), + }, subTrace, nil +} + +// Build an EthTrace for a "private" method invocation from the EVM. This should only be called with +// an ExecutionTrace from an EVM instance and on a method between 1 and 1023 inclusive. +func traceEVMPrivate(env *environment, addr []int, et *types.ExecutionTrace) (*ethtypes.EthTrace, *types.ExecutionTrace, error) { + // The EVM actor implements DELEGATECALL by: + // + // 1. Asking the callee for its bytecode by calling it on the GetBytecode method. + // 2. Recursively invoking the currently executing contract on the + // InvokeContractDelegate method. + // + // The code below "reconstructs" that delegate call by: + // + // 1. Remembering the last contract on which we called GetBytecode. + // 2. Treating the contract invoked in step 1 as the DELEGATECALL receiver. + // + // Note, however: GetBytecode will be called, e.g., if the user invokes the + // EXTCODECOPY instruction. It's not an error to see multiple GetBytecode calls + // before we see an InvokeContractDelegate. + switch et.Msg.Method { + case builtin.MethodsEVM.GetBytecode: + // NOTE: I'm not checking anything about the receiver here. The EVM won't + // DELEGATECALL any non-EVM actor, but there's no need to encode that fact + // here in case we decide to loosen this up in the future. + if et.MsgRct.ExitCode.IsSuccess() { + to := traceToAddress(et.InvokedActor) + env.lastByteCode = &to + } else { + env.lastByteCode = nil + } + return nil, nil, nil + case builtin.MethodsEVM.InvokeContractDelegate: + // NOTE: We return errors in all the failure cases below instead of trying + // to continue because the caller is an EVM actor. If something goes wrong + // here, there's a bug in our EVM implementation. + + // Handle delegate calls + // + // 1) Look for trace from an EVM actor to itself on InvokeContractDelegate, + // method 6. + // 2) Check that the previous trace calls another actor on method 3 + // (GetByteCode) and they are at the same level (same parent) + // 3) Treat this as a delegate call to actor A. + if env.lastByteCode == nil { + return nil, nil, xerrors.Errorf("unknown bytecode for delegate call") + } + + if to := traceToAddress(et.InvokedActor); env.caller != to { + return nil, nil, xerrors.Errorf("delegate-call not from & to self: %s != %s", env.caller, to) + } + + dp, err := decodeParams[evm12.DelegateCallParams](&et.Msg) + if err != nil { + return nil, nil, xerrors.Errorf("failed to decode delegate-call params: %w", err) + } + + output, err := decodePayload(et.MsgRct.Return, et.MsgRct.ReturnCodec) + if err != nil { + return nil, nil, xerrors.Errorf("failed to decode delegate-call return: %w", err) + } + + return ðtypes.EthTrace{ + Type: "call", + Action: ðtypes.EthCallTraceAction{ + CallType: "delegatecall", + From: env.caller, + To: *env.lastByteCode, + Gas: ethtypes.EthUint64(et.Msg.GasLimit), + Value: ethtypes.EthBigInt(et.Msg.Value), + Input: dp.Input, + }, + Result: ðtypes.EthCallTraceResult{ + GasUsed: ethtypes.EthUint64(et.SumGas().TotalGas), + Output: output, + }, + TraceAddress: addr, + Error: traceErrMsg(et), + }, et, nil + } + // We drop all other "private" calls from FEVM. We _forbid_ explicit calls between 0 and + // 1024 (exclusive), so any calls in this range must be implementation details. + return nil, nil, nil +} diff --git a/node/impl/full/eth_utils.go b/node/impl/full/eth_utils.go index 5f22cea82..04e8e4970 100644 --- a/node/impl/full/eth_utils.go +++ b/node/impl/full/eth_utils.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -382,34 +381,35 @@ func parseEthRevert(ret []byte) string { // 3. Otherwise, we fall back to returning a masked ID Ethereum address. If the supplied address is an f0 address, we // use that ID to form the masked ID address. // 4. Otherwise, we fetch the actor's ID from the state tree and form the masked ID with it. +// +// If the actor doesn't exist in the state-tree but we have its ID, we use a masked ID address. It could have been deleted. func lookupEthAddress(addr address.Address, st *state.StateTree) (ethtypes.EthAddress, error) { - // BLOCK A: We are trying to get an actual Ethereum address from an f410 address. // Attempt to convert directly, if it's an f4 address. ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(addr) if err == nil && !ethAddr.IsMaskedID() { return ethAddr, nil } - // Lookup on the target actor and try to get an f410 address. - if actor, err := st.GetActor(addr); err != nil { - return ethtypes.EthAddress{}, err - } else if actor.Address != nil { - if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() { - return ethAddr, nil - } - } - - // BLOCK B: We gave up on getting an actual Ethereum address and are falling back to a Masked ID address. - // Check if we already have an ID addr, and use it if possible. - if err == nil && ethAddr.IsMaskedID() { - return ethAddr, nil - } - // Otherwise, resolve the ID addr. idAddr, err := st.LookupID(addr) if err != nil { return ethtypes.EthAddress{}, err } + + // Lookup on the target actor and try to get an f410 address. + if actor, err := st.GetActor(idAddr); errors.Is(err, types.ErrActorNotFound) { + // Not found -> use a masked ID address + } else if err != nil { + // Any other error -> fail. + return ethtypes.EthAddress{}, err + } else if actor.Address == nil { + // No delegated address -> use masked ID address. + } else if ethAddr, err := ethtypes.EthAddressFromFilecoinAddress(*actor.Address); err == nil && !ethAddr.IsMaskedID() { + // Conversable into an eth address, use it. + return ethAddr, nil + } + + // Otherwise, use the masked address. return ethtypes.EthAddressFromFilecoinAddress(idAddr) } @@ -525,6 +525,9 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E } to = revertedEthAddress } + toPtr := &to + + // Finally, convert the input parameters to "solidity ABI". // For empty, we use "0" as the codec. Otherwise, we use CBOR for message // parameters. @@ -533,11 +536,31 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E codec = uint64(multicodec.Cbor) } - // We decode as a native call first. - ethTx := ethtypes.EthTx{ - To: &to, + // We try to decode the input as an EVM method invocation and/or a contract creation. If + // that fails, we encode the "native" parameters as Solidity ABI. + var input []byte + switch msg.Method { + case builtintypes.MethodsEVM.InvokeContract, builtintypes.MethodsEAM.CreateExternal: + inp, err := decodePayload(msg.Params, codec) + if err == nil { + // If this is a valid "create external", unset the "to" address. + if msg.Method == builtintypes.MethodsEAM.CreateExternal { + toPtr = nil + } + input = []byte(inp) + break + } + // Yeah, we're going to ignore errors here because the user can send whatever they + // want and may send garbage. + fallthrough + default: + input = encodeFilecoinParamsAsABI(msg.Method, codec, msg.Params) + } + + return ethtypes.EthTx{ + To: toPtr, From: from, - Input: encodeFilecoinParamsAsABI(msg.Method, codec, msg.Params), + Input: input, Nonce: ethtypes.EthUint64(msg.Nonce), ChainID: ethtypes.EthUint64(build.Eip155ChainId), Value: ethtypes.EthBigInt(msg.Value), @@ -546,25 +569,7 @@ func ethTxFromNativeMessage(msg *types.Message, st *state.StateTree) (ethtypes.E MaxFeePerGas: ethtypes.EthBigInt(msg.GasFeeCap), MaxPriorityFeePerGas: ethtypes.EthBigInt(msg.GasPremium), AccessList: []ethtypes.EthHash{}, - } - - // Then we try to see if it's "special". If we fail, we ignore the error and keep treating - // it as a native message. Unfortunately, the user is free to send garbage that may not - // properly decode. - if msg.Method == builtintypes.MethodsEVM.InvokeContract { - // try to decode it as a contract invocation first. - if inp, err := decodePayload(msg.Params, codec); err == nil { - ethTx.Input = []byte(inp) - } - } else if msg.To == builtin.EthereumAddressManagerActorAddr && msg.Method == builtintypes.MethodsEAM.CreateExternal { - // Then, try to decode it as a contract deployment from an EOA. - if inp, err := decodePayload(msg.Params, codec); err == nil { - ethTx.Input = []byte(inp) - ethTx.To = nil - } - } - - return ethTx, nil + }, nil } func getSignedMessage(ctx context.Context, cs *store.ChainStore, msgCid cid.Cid) (*types.SignedMessage, error) { diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index fac48a350..addcc41be 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -44,6 +44,8 @@ type MpoolAPI struct { WalletAPI GasAPI + RaftAPI + MessageSigner messagesigner.MsgSigner PushLocks *dtypes.MpoolLocker @@ -143,6 +145,20 @@ func (a *MpoolAPI) MpoolPushMessage(ctx context.Context, msg *types.Message, spe msg = &cp inMsg := *msg + // Redirect to leader if current node is not leader. A single non raft based node is always the leader + if !a.RaftAPI.IsLeader(ctx) { + var signedMsg types.SignedMessage + redirected, err := a.RaftAPI.RedirectToLeader(ctx, "MpoolPushMessage", api.MpoolMessageWhole{Msg: msg, Spec: spec}, &signedMsg) + if err != nil { + return nil, err + } + // It's possible that the current node became the leader between the check and the redirect + // In that case, continue with rest of execution and only return signedMsg if something was redirected + if redirected { + return &signedMsg, nil + } + } + // Generate spec and uuid if not available in the message if spec == nil { spec = &api.MessageSendSpec{ diff --git a/node/impl/full/raft.go b/node/impl/full/raft.go new file mode 100644 index 000000000..8d665ddd5 --- /dev/null +++ b/node/impl/full/raft.go @@ -0,0 +1,50 @@ +package full + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/peer" + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/messagesigner" +) + +type RaftAPI struct { + fx.In + + MessageSigner *messagesigner.MessageSignerConsensus `optional:"true"` +} + +func (r *RaftAPI) GetRaftState(ctx context.Context) (*api.RaftStateData, error) { + if r.MessageSigner == nil { + return nil, xerrors.Errorf("raft consensus not enabled. Please check your configuration") + } + raftState, err := r.MessageSigner.GetRaftState(ctx) + if err != nil { + return nil, err + } + return &api.RaftStateData{NonceMap: raftState.NonceMap, MsgUuids: raftState.MsgUuids}, nil +} + +func (r *RaftAPI) Leader(ctx context.Context) (peer.ID, error) { + if r.MessageSigner == nil { + return "", xerrors.Errorf("raft consensus not enabled. Please check your configuration") + } + return r.MessageSigner.Leader(ctx) +} + +func (r *RaftAPI) IsLeader(ctx context.Context) bool { + if r.MessageSigner == nil { + return true + } + return r.MessageSigner.IsLeader(ctx) +} + +func (r *RaftAPI) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) { + if r.MessageSigner == nil { + return false, xerrors.Errorf("raft consensus not enabled. Please check your configuration") + } + return r.MessageSigner.RedirectToLeader(ctx, method, arg, ret) +} diff --git a/node/impl/full/state.go b/node/impl/full/state.go index 0e92c8e5b..dda889832 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -762,7 +762,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } out[strconv.FormatInt(int64(dealID), 10)] = &api.MarketDeal{ Proposal: d, - State: *s, + State: api.MakeDealState(s), } return nil }); err != nil { @@ -779,18 +779,27 @@ func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.Dea return stmgr.GetStorageDeal(ctx, m.StateManager, dealId, ts) } -func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { +func (a *StateAPI) StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) { ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { - return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + return verifreg.NoAllocationID, xerrors.Errorf("loading tipset %s: %w", tsk, err) } st, err := a.StateManager.GetMarketState(ctx, ts) if err != nil { - return nil, err + return verifreg.NoAllocationID, err } allocationId, err := st.GetAllocationIdForPendingDeal(dealId) + if err != nil { + return verifreg.NoAllocationID, err + } + + return allocationId, nil +} + +func (a *StateAPI) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifreg.Allocation, error) { + allocationId, err := a.StateGetAllocationIdForPendingDeal(ctx, dealId, tsk) if err != nil { return nil, err } @@ -857,6 +866,25 @@ func (a *StateAPI) StateGetAllocations(ctx context.Context, clientAddr address.A return allocations, nil } +func (a *StateAPI) StateGetAllAllocations(ctx context.Context, tsk types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) { + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + st, err := a.StateManager.GetVerifregState(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("loading verifreg state: %w", err) + } + + allocations, err := st.GetAllAllocations() + if err != nil { + return nil, xerrors.Errorf("getting all allocations: %w", err) + } + + return allocations, nil +} + func (a *StateAPI) StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifreg.ClaimId, tsk types.TipSetKey) (*verifreg.Claim, error) { idAddr, err := a.StateLookupID(ctx, providerAddr, tsk) if err != nil { @@ -908,6 +936,25 @@ func (a *StateAPI) StateGetClaims(ctx context.Context, providerAddr address.Addr return claims, nil } +func (a *StateAPI) StateGetAllClaims(ctx context.Context, tsk types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) { + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) + if err != nil { + return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) + } + + st, err := a.StateManager.GetVerifregState(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("loading verifreg state: %w", err) + } + + claims, err := st.GetAllClaims() + if err != nil { + return nil, xerrors.Errorf("getting all claims: %w", err) + } + + return claims, nil +} + func (a *StateAPI) StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) { nv, err := a.StateNetworkVersion(ctx, tsk) if err != nil { @@ -1914,6 +1961,8 @@ func (a *StateAPI) StateGetNetworkParams(ctx context.Context) (*api.NetworkParam UpgradeLightningHeight: build.UpgradeLightningHeight, UpgradeThunderHeight: build.UpgradeThunderHeight, UpgradeWatermelonHeight: build.UpgradeWatermelonHeight, + UpgradeDragonHeight: build.UpgradeDragonHeight, + UpgradePhoenixHeight: build.UpgradePhoenixHeight, }, }, nil } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 2ce42c327..90248a355 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -54,6 +54,7 @@ import ( "github.com/filecoin-project/lotus/storage/ctladdr" "github.com/filecoin-project/lotus/storage/paths" sealing "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/fsutil" @@ -243,7 +244,7 @@ func (sm *StorageMinerAPI) SectorsStatus(ctx context.Context, sid abi.SectorNumb return sInfo, nil } -func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) { +func (sm *StorageMinerAPI) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) { so, err := sm.Miner.SectorAddPieceToAny(ctx, size, r, d) if err != nil { // jsonrpc doesn't support returning values with errors, make sure we never do that @@ -506,7 +507,7 @@ func (sm *StorageMinerAPI) ComputeWindowPoSt(ctx context.Context, dlIdx uint64, } func (sm *StorageMinerAPI) ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) { - return sm.StorageMgr.DataCid(ctx, pieceSize, pieceData) + return sm.IStorageMgr.DataCid(ctx, pieceSize, pieceData) } func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 4ce04cefd..d92da1940 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -5,13 +5,12 @@ import ( "path/filepath" "time" - "github.com/multiformats/go-varint" "go.uber.org/fx" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - builtintypes "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/filter" "github.com/filecoin-project/lotus/chain/messagepool" @@ -24,26 +23,26 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -type EventAPI struct { +type EventHelperAPI struct { fx.In full.ChainAPI full.StateAPI } -var _ events.EventAPI = &EventAPI{} +var _ events.EventHelperAPI = &EventHelperAPI{} -func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEvent, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.EthEvent, error) { +func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.EthEventHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.EthEventHandler, error) { ctx := helpers.LifecycleCtx(mctx, lc) - ee := &full.EthEvent{ + ee := &full.EthEventHandler{ Chain: cs, - MaxFilterHeightRange: abi.ChainEpoch(cfg.Events.MaxFilterHeightRange), + MaxFilterHeightRange: abi.ChainEpoch(cfg.MaxFilterHeightRange), SubscribtionCtx: ctx, } - if !cfg.EnableEthRPC || cfg.Events.DisableRealTimeFilterAPI { + if !enableEthRPC || cfg.DisableRealTimeFilterAPI { // all event functionality is disabled // the historic filter API relies on the real time one return ee, nil @@ -54,77 +53,23 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo StateAPI: stateapi, ChainAPI: chainapi, } - ee.FilterStore = filter.NewMemFilterStore(cfg.Events.MaxFilters) + ee.FilterStore = filter.NewMemFilterStore(cfg.MaxFilters) // Start garbage collection for filters lc.Append(fx.Hook{ OnStart: func(context.Context) error { - go ee.GC(ctx, time.Duration(cfg.Events.FilterTTL)) + go ee.GC(ctx, time.Duration(cfg.FilterTTL)) return nil }, }) - // Enable indexing of actor events - var eventIndex *filter.EventIndex - if !cfg.Events.DisableHistoricFilterAPI { - var dbPath string - if cfg.Events.DatabasePath == "" { - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - dbPath = filepath.Join(sqlitePath, "events.db") - } else { - dbPath = cfg.Events.DatabasePath - } - - var err error - eventIndex, err = filter.NewEventIndex(ctx, dbPath, chainapi.Chain) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(context.Context) error { - return eventIndex.Close() - }, - }) - } - - ee.EventFilterManager = &filter.EventFilterManager{ - ChainStore: cs, - EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true - AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { - // we only want to match using f4 addresses - idAddr, err := address.NewIDAddress(uint64(emitter)) - if err != nil { - return address.Undef, false - } - - actor, err := sm.LoadActor(ctx, idAddr, ts) - if err != nil || actor.Address == nil { - return address.Undef, false - } - - // if robust address is not f4 then we won't match against it so bail early - if actor.Address.Protocol() != address.Delegated { - return address.Undef, false - } - // we have an f4 address, make sure it's assigned by the EAM - if namespace, _, err := varint.FromUvarint(actor.Address.Payload()); err != nil || namespace != builtintypes.EthereumAddressManagerActorID { - return address.Undef, false - } - return *actor.Address, true - }, - - MaxFilterResults: cfg.Events.MaxFilterResults, - } ee.TipSetFilterManager = &filter.TipSetFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, + MaxFilterResults: cfg.MaxFilterResults, } ee.MemPoolFilterManager = &filter.MemPoolFilterManager{ - MaxFilterResults: cfg.Events.MaxFilterResults, + MaxFilterResults: cfg.MaxFilterResults, } + ee.EventFilterManager = fm lc.Append(fx.Hook{ OnStart: func(context.Context) error { @@ -133,7 +78,6 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo return err } // ignore returned tipsets - _ = ev.Observe(ee.EventFilterManager) _ = ev.Observe(ee.TipSetFilterManager) ch, err := mp.Updates(ctx) @@ -149,3 +93,91 @@ func EthEventAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo return ee, nil } } + +func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + + // Enable indexing of actor events + var eventIndex *filter.EventIndex + if !cfg.DisableHistoricFilterAPI { + var dbPath string + if cfg.DatabasePath == "" { + sqlitePath, err := r.SqlitePath() + if err != nil { + return nil, err + } + dbPath = filepath.Join(sqlitePath, "events.db") + } else { + dbPath = cfg.DatabasePath + } + + var err error + eventIndex, err = filter.NewEventIndex(ctx, dbPath, chainapi.Chain) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(context.Context) error { + return eventIndex.Close() + }, + }) + } + + fm := &filter.EventFilterManager{ + ChainStore: cs, + EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true + // TODO: + // We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands + AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + actor, err := sm.LoadActor(ctx, idAddr, ts) + if err != nil || actor.Address == nil { + return idAddr, true + } + + return *actor.Address, true + }, + + MaxFilterResults: cfg.MaxFilterResults, + } + + lc.Append(fx.Hook{ + OnStart: func(context.Context) error { + ev, err := events.NewEvents(ctx, &evapi) + if err != nil { + return err + } + _ = ev.Observe(fm) + return nil + }, + }) + + return fm, nil + } +} + +func ActorEventHandler(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.ActorEventHandler, error) { + if !cfg.EnableActorEventsAPI || cfg.DisableRealTimeFilterAPI { + return full.NewActorEventHandler( + cs, + nil, // no EventFilterManager disables API calls + time.Duration(build.BlockDelaySecs)*time.Second, + abi.ChainEpoch(cfg.MaxFilterHeightRange), + ), nil + } + + return full.NewActorEventHandler( + cs, + fm, + time.Duration(build.BlockDelaySecs)*time.Second, + abi.ChainEpoch(cfg.MaxFilterHeightRange), + ), nil + } +} diff --git a/node/modules/dtypes/beacon.go b/node/modules/dtypes/beacon.go index 28bbdf281..91dd5cf57 100644 --- a/node/modules/dtypes/beacon.go +++ b/node/modules/dtypes/beacon.go @@ -13,4 +13,5 @@ type DrandConfig struct { Servers []string Relays []string ChainInfoJSON string + IsChained bool // Prior to Drand quicknet, beacons form a chain, post quicknet they do not (FIP-0063) } diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index 0255b6198..b36416e4e 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -21,8 +21,8 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI) (*full.EthModule, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI) (*full.EthModule, error) { +func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI) (*full.EthModule, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI) (*full.EthModule, error) { sqlitePath, err := r.SqlitePath() if err != nil { return nil, err diff --git a/node/modules/rpc.go b/node/modules/rpc.go new file mode 100644 index 000000000..d76949737 --- /dev/null +++ b/node/modules/rpc.go @@ -0,0 +1,55 @@ +package modules + +import ( + "context" + + rpc "github.com/libp2p/go-libp2p-gorpc" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + consensus "github.com/filecoin-project/lotus/lib/consensus/raft" + "github.com/filecoin-project/lotus/node/impl/full" +) + +type RPCHandler struct { + mpoolAPI full.MpoolAPI + cons *consensus.Consensus +} + +func NewRPCHandler(mpoolAPI full.MpoolAPI, cons *consensus.Consensus) *RPCHandler { + return &RPCHandler{mpoolAPI, cons} +} + +func (h *RPCHandler) MpoolPushMessage(ctx context.Context, msgWhole *api.MpoolMessageWhole, ret *types.SignedMessage) error { + signedMsg, err := h.mpoolAPI.MpoolPushMessage(ctx, msgWhole.Msg, msgWhole.Spec) + if err != nil { + return err + } + *ret = *signedMsg + return nil +} + +func (h *RPCHandler) AddPeer(ctx context.Context, pid peer.ID, ret *struct{}) error { + return h.cons.AddPeer(ctx, pid) +} + +// Add other consensus RPC calls here + +func NewRPCClient(host host.Host) *rpc.Client { + protocolID := protocol.ID("/rpc/lotus-chain/v0") + return rpc.NewClient(host, protocolID) +} + +func NewRPCServer(ctx context.Context, host host.Host, rpcHandler *RPCHandler) error { + + authF := func(pid peer.ID, svc, method string) bool { + return rpcHandler.cons.IsTrustedPeer(ctx, pid) + } + + protocolID := protocol.ID("/rpc/lotus-chain/v0") + rpcServer := rpc.NewServer(host, protocolID, rpc.WithAuthorizeFunc(authF)) + return rpcServer.RegisterName("Consensus", rpcHandler) +} diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 0680029bf..e27a497bb 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -1017,6 +1017,11 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error TerminateBatchWait: config.Duration(cfg.TerminateBatchWait), MaxSectorProveCommitsSubmittedPerEpoch: cfg.MaxSectorProveCommitsSubmittedPerEpoch, UseSyntheticPoRep: cfg.UseSyntheticPoRep, + + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: cfg.RequireNotificationSuccessUpdate, } c.SetSealingConfig(newCfg) }) @@ -1062,6 +1067,11 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se TerminateBatchMin: sealingCfg.TerminateBatchMin, TerminateBatchWait: time.Duration(sealingCfg.TerminateBatchWait), UseSyntheticPoRep: sealingCfg.UseSyntheticPoRep, + + RequireActivationSuccess: sealingCfg.RequireActivationSuccess, + RequireActivationSuccessUpdate: sealingCfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: sealingCfg.RequireNotificationSuccess, + RequireNotificationSuccessUpdate: sealingCfg.RequireNotificationSuccessUpdate, } } diff --git a/paychmgr/cbor_gen.go b/paychmgr/cbor_gen.go index f97c176a3..3f8aaa7b5 100644 --- a/paychmgr/cbor_gen.go +++ b/paychmgr/cbor_gen.go @@ -34,7 +34,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { } // t.Proof ([]uint8) (slice) - if len("Proof") > cbg.MaxLength { + if len("Proof") > 8192 { return xerrors.Errorf("Value in field \"Proof\" was too long") } @@ -45,7 +45,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Proof) > cbg.ByteArrayMaxLen { + if len(t.Proof) > 2097152 { return xerrors.Errorf("Byte array in field t.Proof was too long") } @@ -53,12 +53,12 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } // t.Voucher (paych.SignedVoucher) (struct) - if len("Voucher") > cbg.MaxLength { + if len("Voucher") > 8192 { return xerrors.Errorf("Value in field \"Voucher\" was too long") } @@ -74,7 +74,7 @@ func (t *VoucherInfo) MarshalCBOR(w io.Writer) error { } // t.Submitted (bool) (bool) - if len("Submitted") > cbg.MaxLength { + if len("Submitted") > 8192 { return xerrors.Errorf("Value in field \"Submitted\" was too long") } @@ -120,7 +120,7 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -137,7 +137,7 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Proof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -148,9 +148,10 @@ func (t *VoucherInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.Voucher (paych.SignedVoucher) (struct) case "Voucher": @@ -211,7 +212,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Amount (big.Int) (struct) - if len("Amount") > cbg.MaxLength { + if len("Amount") > 8192 { return xerrors.Errorf("Value in field \"Amount\" was too long") } @@ -227,7 +228,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Target (address.Address) (struct) - if len("Target") > cbg.MaxLength { + if len("Target") > 8192 { return xerrors.Errorf("Value in field \"Target\" was too long") } @@ -243,7 +244,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Channel (address.Address) (struct) - if len("Channel") > cbg.MaxLength { + if len("Channel") > 8192 { return xerrors.Errorf("Value in field \"Channel\" was too long") } @@ -259,7 +260,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Control (address.Address) (struct) - if len("Control") > cbg.MaxLength { + if len("Control") > 8192 { return xerrors.Errorf("Value in field \"Control\" was too long") } @@ -275,7 +276,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.NextLane (uint64) (uint64) - if len("NextLane") > cbg.MaxLength { + if len("NextLane") > 8192 { return xerrors.Errorf("Value in field \"NextLane\" was too long") } @@ -291,7 +292,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Settling (bool) (bool) - if len("Settling") > cbg.MaxLength { + if len("Settling") > 8192 { return xerrors.Errorf("Value in field \"Settling\" was too long") } @@ -307,7 +308,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Vouchers ([]*paychmgr.VoucherInfo) (slice) - if len("Vouchers") > cbg.MaxLength { + if len("Vouchers") > 8192 { return xerrors.Errorf("Value in field \"Vouchers\" was too long") } @@ -318,7 +319,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Vouchers) > cbg.MaxLength { + if len(t.Vouchers) > 8192 { return xerrors.Errorf("Slice value in field t.Vouchers was too long") } @@ -329,10 +330,11 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.ChannelID (string) (string) - if len("ChannelID") > cbg.MaxLength { + if len("ChannelID") > 8192 { return xerrors.Errorf("Value in field \"ChannelID\" was too long") } @@ -343,7 +345,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ChannelID) > cbg.MaxLength { + if len(t.ChannelID) > 8192 { return xerrors.Errorf("Value in field t.ChannelID was too long") } @@ -355,7 +357,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.CreateMsg (cid.Cid) (struct) - if len("CreateMsg") > cbg.MaxLength { + if len("CreateMsg") > 8192 { return xerrors.Errorf("Value in field \"CreateMsg\" was too long") } @@ -377,7 +379,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.Direction (uint64) (uint64) - if len("Direction") > cbg.MaxLength { + if len("Direction") > 8192 { return xerrors.Errorf("Value in field \"Direction\" was too long") } @@ -393,7 +395,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.AddFundsMsg (cid.Cid) (struct) - if len("AddFundsMsg") > cbg.MaxLength { + if len("AddFundsMsg") > 8192 { return xerrors.Errorf("Value in field \"AddFundsMsg\" was too long") } @@ -415,7 +417,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.PendingAmount (big.Int) (struct) - if len("PendingAmount") > cbg.MaxLength { + if len("PendingAmount") > 8192 { return xerrors.Errorf("Value in field \"PendingAmount\" was too long") } @@ -431,7 +433,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.AvailableAmount (big.Int) (struct) - if len("AvailableAmount") > cbg.MaxLength { + if len("AvailableAmount") > 8192 { return xerrors.Errorf("Value in field \"AvailableAmount\" was too long") } @@ -447,7 +449,7 @@ func (t *ChannelInfo) MarshalCBOR(w io.Writer) error { } // t.PendingAvailableAmount (big.Int) (struct) - if len("PendingAvailableAmount") > cbg.MaxLength { + if len("PendingAvailableAmount") > 8192 { return xerrors.Errorf("Value in field \"PendingAvailableAmount\" was too long") } @@ -493,7 +495,7 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -593,7 +595,7 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Vouchers: array too large (%d)", extra) } @@ -631,14 +633,14 @@ func (t *ChannelInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.ChannelID (string) (string) case "ChannelID": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -758,7 +760,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.Err (string) (string) - if len("Err") > cbg.MaxLength { + if len("Err") > 8192 { return xerrors.Errorf("Value in field \"Err\" was too long") } @@ -769,7 +771,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Err) > cbg.MaxLength { + if len(t.Err) > 8192 { return xerrors.Errorf("Value in field t.Err was too long") } @@ -781,7 +783,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.MsgCid (cid.Cid) (struct) - if len("MsgCid") > cbg.MaxLength { + if len("MsgCid") > 8192 { return xerrors.Errorf("Value in field \"MsgCid\" was too long") } @@ -797,7 +799,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.Received (bool) (bool) - if len("Received") > cbg.MaxLength { + if len("Received") > 8192 { return xerrors.Errorf("Value in field \"Received\" was too long") } @@ -813,7 +815,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { } // t.ChannelID (string) (string) - if len("ChannelID") > cbg.MaxLength { + if len("ChannelID") > 8192 { return xerrors.Errorf("Value in field \"ChannelID\" was too long") } @@ -824,7 +826,7 @@ func (t *MsgInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ChannelID) > cbg.MaxLength { + if len(t.ChannelID) > 8192 { return xerrors.Errorf("Value in field t.ChannelID was too long") } @@ -866,7 +868,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -879,7 +881,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { case "Err": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -921,7 +923,7 @@ func (t *MsgInfo) UnmarshalCBOR(r io.Reader) (err error) { case "ChannelID": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/provider/lpweb/api/debug/debug.go b/provider/lpweb/api/debug/debug.go deleted file mode 100644 index 845684519..000000000 --- a/provider/lpweb/api/debug/debug.go +++ /dev/null @@ -1,229 +0,0 @@ -// Package debug provides the API for various debug endpoints in lotus-provider. -package debug - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "sort" - "sync" - "time" - - "github.com/BurntSushi/toml" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/build" - cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" -) - -var log = logging.Logger("lp/web/debug") - -type debug struct { - *deps.Deps -} - -func Routes(r *mux.Router, deps *deps.Deps) { - d := debug{deps} - r.HandleFunc("/chain-state-sse", d.chainStateSSE) -} - -type rpcInfo struct { - Address string - CLayers []string - Reachable bool - SyncState string - Version string -} - -func (d *debug) chainStateSSE(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Headers", "Content-Type") - w.Header().Set("Content-Type", "text/event-stream") - w.Header().Set("Cache-Control", "no-cache") - w.Header().Set("Connection", "keep-alive") - - ctx := r.Context() - - for { - - type minimalApiInfo struct { - Apis struct { - ChainApiInfo []string - } - } - - rpcInfos := map[string]minimalApiInfo{} // config name -> api info - confNameToAddr := map[string]string{} // config name -> api address - - err := forEachConfig[minimalApiInfo](d, func(name string, info minimalApiInfo) error { - if len(info.Apis.ChainApiInfo) == 0 { - return nil - } - - rpcInfos[name] = info - - for _, addr := range info.Apis.ChainApiInfo { - ai := cliutil.ParseApiInfo(addr) - confNameToAddr[name] = ai.Addr - } - - return nil - }) - if err != nil { - log.Errorw("getting api info", "error", err) - return - } - - dedup := map[string]bool{} // for dedup by address - - infos := map[string]rpcInfo{} // api address -> rpc info - var infosLk sync.Mutex - - var wg sync.WaitGroup - for _, info := range rpcInfos { - ai := cliutil.ParseApiInfo(info.Apis.ChainApiInfo[0]) - if dedup[ai.Addr] { - continue - } - dedup[ai.Addr] = true - wg.Add(1) - go func() { - defer wg.Done() - var clayers []string - for layer, a := range confNameToAddr { - if a == ai.Addr { - clayers = append(clayers, layer) - } - } - - myinfo := rpcInfo{ - Address: ai.Addr, - Reachable: false, - CLayers: clayers, - } - defer func() { - infosLk.Lock() - defer infosLk.Unlock() - infos[ai.Addr] = myinfo - }() - da, err := ai.DialArgs("v1") - if err != nil { - log.Warnw("DialArgs", "error", err) - return - } - - ah := ai.AuthHeader() - - v1api, closer, err := client.NewFullNodeRPCV1(ctx, da, ah) - if err != nil { - log.Warnf("Not able to establish connection to node with addr: %s", ai.Addr) - return - } - defer closer() - - ver, err := v1api.Version(ctx) - if err != nil { - log.Warnw("Version", "error", err) - return - } - - head, err := v1api.ChainHead(ctx) - if err != nil { - log.Warnw("ChainHead", "error", err) - return - } - - var syncState string - switch { - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*3/2): // within 1.5 epochs - syncState = "ok" - case time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs*5): // within 5 epochs - syncState = fmt.Sprintf("slow (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - default: - syncState = fmt.Sprintf("behind (%s behind)", time.Since(time.Unix(int64(head.MinTimestamp()), 0)).Truncate(time.Second)) - } - - myinfo = rpcInfo{ - Address: ai.Addr, - CLayers: clayers, - Reachable: true, - Version: ver.Version, - SyncState: syncState, - } - }() - } - wg.Wait() - - var infoList []rpcInfo - for _, i := range infos { - infoList = append(infoList, i) - } - sort.Slice(infoList, func(i, j int) bool { - return infoList[i].Address < infoList[j].Address - }) - - fmt.Fprintf(w, "data: ") - err = json.NewEncoder(w).Encode(&infoList) - if err != nil { - log.Warnw("json encode", "error", err) - return - } - fmt.Fprintf(w, "\n\n") - if f, ok := w.(http.Flusher); ok { - f.Flush() - } - - time.Sleep(time.Duration(build.BlockDelaySecs) * time.Second) - - select { // stop running if there is reader. - case <-ctx.Done(): - return - default: - } - } -} - -func forEachConfig[T any](a *debug, cb func(name string, v T) error) error { - confs, err := a.loadConfigs(context.Background()) - if err != nil { - return err - } - - for name, tomlStr := range confs { // todo for-each-config - var info T - if err := toml.Unmarshal([]byte(tomlStr), &info); err != nil { - return xerrors.Errorf("unmarshaling %s config: %w", name, err) - } - - if err := cb(name, info); err != nil { - return xerrors.Errorf("cb: %w", err) - } - } - - return nil -} - -func (d *debug) loadConfigs(ctx context.Context) (map[string]string, error) { - //err := db.QueryRow(cctx.Context, `SELECT config FROM harmony_config WHERE title=$1`, layer).Scan(&text) - - rows, err := d.DB.Query(ctx, `SELECT title, config FROM harmony_config`) - if err != nil { - return nil, xerrors.Errorf("getting db configs: %w", err) - } - - configs := make(map[string]string) - for rows.Next() { - var title, config string - if err := rows.Scan(&title, &config); err != nil { - return nil, xerrors.Errorf("scanning db configs: %w", err) - } - configs[title] = config - } - - return configs, nil -} diff --git a/provider/lpweb/api/routes.go b/provider/lpweb/api/routes.go deleted file mode 100644 index 85b17486f..000000000 --- a/provider/lpweb/api/routes.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package api provides the HTTP API for the lotus provider web gui. -package api - -import ( - "github.com/gorilla/mux" - - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" - "github.com/filecoin-project/lotus/provider/lpweb/api/debug" -) - -func Routes(r *mux.Router, deps *deps.Deps) { - debug.Routes(r.PathPrefix("/debug").Subrouter(), deps) -} diff --git a/provider/lpweb/hapi/routes.go b/provider/lpweb/hapi/routes.go deleted file mode 100644 index b07ab60a5..000000000 --- a/provider/lpweb/hapi/routes.go +++ /dev/null @@ -1,35 +0,0 @@ -package hapi - -import ( - "embed" - "html/template" - - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" -) - -//go:embed web/* -var templateFS embed.FS - -func Routes(r *mux.Router, deps *deps.Deps) error { - t, err := template.ParseFS(templateFS, "web/*") - if err != nil { - return xerrors.Errorf("parse templates: %w", err) - } - - a := &app{ - db: deps.DB, - t: t, - } - - r.HandleFunc("/simpleinfo/actorsummary", a.actorSummary) - r.HandleFunc("/simpleinfo/machines", a.indexMachines) - r.HandleFunc("/simpleinfo/tasks", a.indexTasks) - r.HandleFunc("/simpleinfo/taskhistory", a.indexTasksHistory) - return nil -} - -var log = logging.Logger("lpweb") diff --git a/provider/lpweb/hapi/simpleinfo.go b/provider/lpweb/hapi/simpleinfo.go deleted file mode 100644 index ee36a1e17..000000000 --- a/provider/lpweb/hapi/simpleinfo.go +++ /dev/null @@ -1,187 +0,0 @@ -package hapi - -import ( - "context" - "html/template" - "net/http" - "os" - "sync" - "time" - - "github.com/filecoin-project/lotus/lib/harmony/harmonydb" -) - -type app struct { - db *harmonydb.DB - t *template.Template - - actorInfoLk sync.Mutex - actorInfos []actorInfo -} - -type actorInfo struct { - Address string - CLayers []string - - QualityAdjustedPower string - RawBytePower string - - Deadlines []actorDeadline -} - -type actorDeadline struct { - Empty bool - Current bool - Proven bool - PartFaulty bool - Faulty bool -} - -func (a *app) actorSummary(w http.ResponseWriter, r *http.Request) { - a.actorInfoLk.Lock() - defer a.actorInfoLk.Unlock() - - a.executeTemplate(w, "actor_summary", a.actorInfos) -} - -func (a *app) indexMachines(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterMachineSummary(r.Context()) - if err != nil { - log.Errorf("cluster machine summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_machines", s) -} - -func (a *app) indexTasks(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskSummary(r.Context()) - if err != nil { - log.Errorf("cluster task summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_tasks", s) -} - -func (a *app) indexTasksHistory(w http.ResponseWriter, r *http.Request) { - s, err := a.clusterTaskHistorySummary(r.Context()) - if err != nil { - log.Errorf("cluster task history summary: %v", err) - http.Error(w, "internal server error", http.StatusInternalServerError) - return - } - - a.executeTemplate(w, "cluster_task_history", s) -} - -var templateDev = os.Getenv("LOTUS_WEB_DEV") == "1" - -func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) { - if templateDev { - fs := os.DirFS("./cmd/lotus-provider/web/hapi/web") - a.t = template.Must(template.ParseFS(fs, "*")) - } - if err := a.t.ExecuteTemplate(w, name, data); err != nil { - log.Errorf("execute template %s: %v", name, err) - http.Error(w, "internal server error", http.StatusInternalServerError) - } -} - -type machineSummary struct { - Address string - ID int64 - SinceContact string -} - -type taskSummary struct { - Name string - SincePosted string - Owner *string - ID int64 -} - -type taskHistorySummary struct { - Name string - TaskID int64 - - Posted, Start, End string - - Result bool - Err string - - CompletedBy string -} - -func (a *app) clusterMachineSummary(ctx context.Context) ([]machineSummary, error) { - rows, err := a.db.Query(ctx, "SELECT id, host_and_port, last_contact FROM harmony_machines") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []machineSummary - for rows.Next() { - var m machineSummary - var lastContact time.Time - - if err := rows.Scan(&m.ID, &m.Address, &lastContact); err != nil { - return nil, err // Handle error - } - - m.SinceContact = time.Since(lastContact).Round(time.Second).String() - - summaries = append(summaries, m) - } - return summaries, nil -} - -func (a *app) clusterTaskSummary(ctx context.Context) ([]taskSummary, error) { - rows, err := a.db.Query(ctx, "SELECT id, name, update_time, owner_id FROM harmony_task") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskSummary - for rows.Next() { - var t taskSummary - var posted time.Time - - if err := rows.Scan(&t.ID, &t.Name, &posted, &t.Owner); err != nil { - return nil, err // Handle error - } - - t.SincePosted = time.Since(posted).Round(time.Second).String() - - summaries = append(summaries, t) - } - return summaries, nil -} - -func (a *app) clusterTaskHistorySummary(ctx context.Context) ([]taskHistorySummary, error) { - rows, err := a.db.Query(ctx, "SELECT id, name, task_id, posted, work_start, work_end, result, err, completed_by_host_and_port FROM harmony_task_history ORDER BY work_end DESC LIMIT 15") - if err != nil { - return nil, err // Handle error - } - defer rows.Close() - - var summaries []taskHistorySummary - for rows.Next() { - var t taskHistorySummary - var posted, start, end time.Time - - if err := rows.Scan(&t.TaskID, &t.Name, &t.TaskID, &posted, &start, &end, &t.Result, &t.Err, &t.CompletedBy); err != nil { - return nil, err // Handle error - } - - t.Posted = posted.Round(time.Second).Format("02 Jan 06 15:04") - t.Start = start.Round(time.Second).Format("02 Jan 06 15:04") - t.End = end.Round(time.Second).Format("02 Jan 06 15:04") - - summaries = append(summaries, t) - } - return summaries, nil -} diff --git a/provider/lpweb/hapi/web/actor_summary.gohtml b/provider/lpweb/hapi/web/actor_summary.gohtml deleted file mode 100644 index 31992fb23..000000000 --- a/provider/lpweb/hapi/web/actor_summary.gohtml +++ /dev/null @@ -1,20 +0,0 @@ -{{define "actor_summary"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{.QualityAdjustedPower}} - -
- {{range .Deadlines}} -
- {{end}} -
- - -{{end}} -{{end}} \ No newline at end of file diff --git a/provider/lpweb/hapi/web/chain_rpcs.gohtml b/provider/lpweb/hapi/web/chain_rpcs.gohtml deleted file mode 100644 index 5705da395..000000000 --- a/provider/lpweb/hapi/web/chain_rpcs.gohtml +++ /dev/null @@ -1,15 +0,0 @@ -{{define "chain_rpcs"}} -{{range .}} - - {{.Address}} - - {{range .CLayers}} - {{.}} - {{end}} - - {{if .Reachable}}ok{{else}}FAIL{{end}} - {{if eq "ok" .SyncState}}ok{{else}}{{.SyncState}}{{end}} - {{.Version}} - -{{end}} -{{end}} diff --git a/provider/lpweb/hapi/web/cluster_machines.gohtml b/provider/lpweb/hapi/web/cluster_machines.gohtml deleted file mode 100644 index f94f53bf8..000000000 --- a/provider/lpweb/hapi/web/cluster_machines.gohtml +++ /dev/null @@ -1,10 +0,0 @@ -{{define "cluster_machines"}} -{{range .}} - - {{.Address}} - {{.ID}} - todo - {{.SinceContact}} - -{{end}} -{{end}} diff --git a/provider/lpweb/hapi/web/cluster_task_history.gohtml b/provider/lpweb/hapi/web/cluster_task_history.gohtml deleted file mode 100644 index 8f04ef5c5..000000000 --- a/provider/lpweb/hapi/web/cluster_task_history.gohtml +++ /dev/null @@ -1,14 +0,0 @@ -{{define "cluster_task_history"}} - {{range .}} - - {{.Name}} - {{.TaskID}} - {{.CompletedBy}} - {{.Posted}} - {{.Start}} - {{.End}} - {{if .Result}}success{{else}}error{{end}} - {{.Err}} - - {{end}} -{{end}} diff --git a/provider/lpweb/hapi/web/cluster_tasks.gohtml b/provider/lpweb/hapi/web/cluster_tasks.gohtml deleted file mode 100644 index 690ab8cff..000000000 --- a/provider/lpweb/hapi/web/cluster_tasks.gohtml +++ /dev/null @@ -1,10 +0,0 @@ -{{define "cluster_tasks"}} - {{range .}} - - {{.Name}} - {{.ID}} - {{.SincePosted}} - {{.Owner}} - - {{end}} -{{end}} diff --git a/provider/lpweb/srv.go b/provider/lpweb/srv.go deleted file mode 100644 index f6bcfcf85..000000000 --- a/provider/lpweb/srv.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package lpweb defines the HTTP web server for static files and endpoints. -package lpweb - -import ( - "context" - "embed" - "io" - "io/fs" - "net" - "net/http" - "os" - "path" - "strings" - "time" - - "github.com/gorilla/mux" - "go.opencensus.io/tag" - - "github.com/filecoin-project/lotus/cmd/lotus-provider/deps" - "github.com/filecoin-project/lotus/metrics" - "github.com/filecoin-project/lotus/provider/lpweb/api" - "github.com/filecoin-project/lotus/provider/lpweb/hapi" -) - -//go:embed static -var static embed.FS - -var basePath = "/static/" - -// An dev mode hack for no-restart changes to static and templates. -// You still need to recomplie the binary for changes to go code. -var webDev = os.Getenv("LOTUS_WEB_DEV") == "1" - -func GetSrv(ctx context.Context, deps *deps.Deps) (*http.Server, error) { - mx := mux.NewRouter() - err := hapi.Routes(mx.PathPrefix("/hapi").Subrouter(), deps) - if err != nil { - return nil, err - } - api.Routes(mx.PathPrefix("/api").Subrouter(), deps) - - basePath := basePath - - var static fs.FS = static - if webDev { - basePath = "cmd/lotus-provider/web/static" - static = os.DirFS(basePath) - } - - mx.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the request is for a directory, redirect to the index file. - if strings.HasSuffix(r.URL.Path, "/") { - r.URL.Path += "index.html" - } - - file, err := static.Open(path.Join(basePath, r.URL.Path)[1:]) - if err != nil { - w.WriteHeader(http.StatusNotFound) - _, _ = w.Write([]byte("404 Not Found")) - return - } - defer func() { _ = file.Close() }() - - fileInfo, err := file.Stat() - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte("500 Internal Server Error")) - return - } - - http.ServeContent(w, r, fileInfo.Name(), fileInfo.ModTime(), file.(io.ReadSeeker)) - }) - - return &http.Server{ - Handler: http.HandlerFunc(mx.ServeHTTP), - BaseContext: func(listener net.Listener) context.Context { - ctx, _ := tag.New(context.Background(), tag.Upsert(metrics.APIInterface, "lotus-provider")) - return ctx - }, - Addr: deps.Cfg.Subsystems.GuiAddress, - ReadTimeout: time.Minute * 3, - ReadHeaderTimeout: time.Minute * 3, // lint - }, nil -} diff --git a/provider/lpweb/static/chain-connectivity.js b/provider/lpweb/static/chain-connectivity.js deleted file mode 100644 index ea7349c41..000000000 --- a/provider/lpweb/static/chain-connectivity.js +++ /dev/null @@ -1,73 +0,0 @@ -import { LitElement, html, css } from 'https://cdn.jsdelivr.net/gh/lit/dist@3/all/lit-all.min.js'; -window.customElements.define('chain-connectivity', class MyElement extends LitElement { - constructor() { - super(); - this.data = []; - this.loadData(); - } - loadData() { - const eventSource = new EventSource('/api/debug/chain-state-sse'); - eventSource.onmessage = (event) => { - this.data = JSON.parse(event.data); - super.requestUpdate(); - }; - eventSource.onerror = (error) => { - console.error('Error:', error); - loadData(); - }; - }; - - static get styles() { - return [css` - :host { - box-sizing: border-box; /* Don't forgert this to include padding/border inside width calculation */ - } - table { - border-collapse: collapse; - } - - table td, table th { - border-left: 1px solid #f0f0f0; - padding: 1px 5px; - } - - table tr td:first-child, table tr th:first-child { - border-left: none; - } - - .success { - color: green; - } - .warning { - color: yellow; - } - .error { - color: red; - } - `]; - } - render = () => html` - - - - - - - - - - - ${this.data.map(item => html` - - - - - - - `)} - - - - -
RPC AddressReachabilitySync StatusVersion
${item.Address}${item.Reachable ? html`ok` : html`FAIL`}${item.SyncState === "ok" ? html`ok` : html`${item.SyncState}`}${item.Version}
Data incoming...
` -}); diff --git a/provider/lpweb/static/index.html b/provider/lpweb/static/index.html deleted file mode 100644 index 98f7336ad..000000000 --- a/provider/lpweb/static/index.html +++ /dev/null @@ -1,193 +0,0 @@ - - - Lotus Provider Cluster Overview - - - - - -
-
-

Lotus Provider Cluster

-
-
- version [todo] -
-
-
-
-
-

Chain Connectivity

- -
-
-
-

Actor Summary

- - - - - - - - - - - -
AddressConfig LayersQaPDeadlines
-
-
-
-

Cluster Machines

- - - - - - - - - - - -
HostIDConfig LayersLast Contact
-
-
-
-

Recently Finished Tasks

- - - - - - - - - - - - - - - -
NameIDExecutorPostedStartEndOutcomeMessage
-
-
-
-

Cluster Tasks

- - - - - - - - - - - -
TaskIDPostedOwner
-
-
- - \ No newline at end of file diff --git a/provider/lpwinning/winning_task.go b/provider/lpwinning/winning_task.go index bf4f2fe71..907b594fd 100644 --- a/provider/lpwinning/winning_task.go +++ b/provider/lpwinning/winning_task.go @@ -156,18 +156,13 @@ func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (don ComputeTime: details.CompTime, } - persistNoWin := func() (bool, error) { - n, err := t.db.Exec(ctx, `UPDATE mining_base_block SET no_win = true WHERE task_id = $1`, taskID) + persistNoWin := func() error { + _, err := t.db.Exec(ctx, `UPDATE mining_base_block SET no_win = true WHERE task_id = $1`, taskID) if err != nil { - return false, xerrors.Errorf("marking base as not-won: %w", err) - } - log.Debugw("persisted no-win", "rows", n) - - if n == 0 { - return false, xerrors.Errorf("persist no win: no rows updated") + return xerrors.Errorf("marking base as not-won: %w", err) } - return true, nil + return nil } // ensure we have a beacon entry for the epoch we're mining on @@ -187,13 +182,13 @@ func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (don if mbi == nil { // not eligible to mine on this base, we're done here log.Debugw("WinPoSt not eligible to mine on this base", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() + return true, persistNoWin() } if !mbi.EligibleForMining { // slashed or just have no power yet, we're done here log.Debugw("WinPoSt not eligible for mining", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() + return true, persistNoWin() } if len(mbi.Sectors) == 0 { @@ -222,7 +217,7 @@ func (t *WinPostTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (don if eproof == nil { // not a winner, we're done here log.Debugw("WinPoSt not a winner", "tipset", types.LogCids(base.TipSet.Cids())) - return persistNoWin() + return true, persistNoWin() } } diff --git a/storage/paths/db_index.go b/storage/paths/db_index.go index e6bf3e5da..1e4abfab1 100644 --- a/storage/paths/db_index.go +++ b/storage/paths/db_index.go @@ -180,13 +180,12 @@ func (dbi *DBIndex) StorageAttach(ctx context.Context, si storiface.StorageInfo, } } - retryWait := time.Millisecond * 100 -retryAttachStorage: // Single transaction to attach storage which is not present in the DB _, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { + var urls sql.NullString var storageId sql.NullString - err = tx.QueryRow( + err = dbi.harmonyDB.QueryRow(ctx, "Select storage_id, urls FROM storage_path WHERE storage_id = $1", string(si.ID)).Scan(&storageId, &urls) if err != nil && !strings.Contains(err.Error(), "no rows in result set") { return false, xerrors.Errorf("storage attach select fails: %v", err) @@ -201,7 +200,7 @@ retryAttachStorage: } currUrls = union(currUrls, si.URLs) - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, "UPDATE storage_path set urls=$1, weight=$2, max_storage=$3, can_seal=$4, can_store=$5, groups=$6, allow_to=$7, allow_types=$8, deny_types=$9 WHERE storage_id=$10", strings.Join(currUrls, ","), si.Weight, @@ -221,7 +220,7 @@ retryAttachStorage: } // Insert storage id - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, "INSERT INTO storage_path "+ "Values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16)", si.ID, @@ -246,11 +245,6 @@ retryAttachStorage: return true, nil }) if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryAttachStorage - } return err } @@ -290,29 +284,22 @@ func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url stri log.Warnw("Dropping sector path endpoint", "path", id, "url", url) } else { - retryWait := time.Millisecond * 100 - retryDropPath: // Single transaction to drop storage path and sector decls which have this as a storage path _, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { // Drop storage path completely - _, err = tx.Exec("DELETE FROM storage_path WHERE storage_id=$1", id) + _, err = dbi.harmonyDB.Exec(ctx, "DELETE FROM storage_path WHERE storage_id=$1", id) if err != nil { return false, err } // Drop all sectors entries which use this storage path - _, err = tx.Exec("DELETE FROM sector_location WHERE storage_id=$1", id) + _, err = dbi.harmonyDB.Exec(ctx, "DELETE FROM sector_location WHERE storage_id=$1", id) if err != nil { return false, err } return true, nil }) if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryDropPath - } return err } log.Warnw("Dropping sector storage", "path", id) @@ -386,11 +373,9 @@ func (dbi *DBIndex) StorageDeclareSector(ctx context.Context, storageID storifac return xerrors.Errorf("invalid filetype") } - retryWait := time.Millisecond * 100 -retryStorageDeclareSector: _, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { var currPrimary sql.NullBool - err = tx.QueryRow( + err = dbi.harmonyDB.QueryRow(ctx, "SELECT is_primary FROM sector_location WHERE miner_id=$1 and sector_num=$2 and sector_filetype=$3 and storage_id=$4", uint64(s.Miner), uint64(s.Number), int(ft), string(storageID)).Scan(&currPrimary) if err != nil && !strings.Contains(err.Error(), "no rows in result set") { @@ -400,7 +385,7 @@ retryStorageDeclareSector: // If storage id already exists for this sector, update primary if need be if currPrimary.Valid { if !currPrimary.Bool && primary { - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, "UPDATE sector_location set is_primary = TRUE WHERE miner_id=$1 and sector_num=$2 and sector_filetype=$3 and storage_id=$4", s.Miner, s.Number, ft, storageID) if err != nil { @@ -410,7 +395,7 @@ retryStorageDeclareSector: log.Warnf("sector %v redeclared in %s", s, storageID) } } else { - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, "INSERT INTO sector_location "+ "values($1, $2, $3, $4, $5)", s.Miner, s.Number, ft, storageID, primary) @@ -422,11 +407,6 @@ retryStorageDeclareSector: return true, nil }) if err != nil { - if harmonydb.IsErrSerialization(err) { - time.Sleep(retryWait) - retryWait *= 2 - goto retryStorageDeclareSector - } return err } @@ -770,7 +750,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac _, err := dbi.harmonyDB.BeginTransaction(ctx, func(tx *harmonydb.Tx) (commit bool, err error) { fts := (read | write).AllSet() - err = tx.Select(&rows, + err = dbi.harmonyDB.Select(ctx, &rows, `SELECT sector_filetype, read_ts, read_refs, write_ts FROM sector_location WHERE miner_id=$1 @@ -812,7 +792,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac } // Acquire write locks - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, `UPDATE sector_location SET write_ts = NOW(), write_lock_owner = $1 WHERE miner_id=$2 @@ -827,7 +807,7 @@ func (dbi *DBIndex) lock(ctx context.Context, sector abi.SectorID, read storifac } // Acquire read locks - _, err = tx.Exec( + _, err = dbi.harmonyDB.Exec(ctx, `UPDATE sector_location SET read_ts = NOW(), read_refs = read_refs + 1 WHERE miner_id=$1 diff --git a/storage/pipeline/cbor_gen.go b/storage/pipeline/cbor_gen.go index c832f8a14..f32c48b4d 100644 --- a/storage/pipeline/cbor_gen.go +++ b/storage/pipeline/cbor_gen.go @@ -14,7 +14,6 @@ import ( abi "github.com/filecoin-project/go-state-types/abi" - api "github.com/filecoin-project/lotus/api" storiface "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -36,7 +35,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.Log ([]sealing.Log) (slice) - if len("Log") > cbg.MaxLength { + if len("Log") > 8192 { return xerrors.Errorf("Value in field \"Log\" was too long") } @@ -47,7 +46,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Log) > cbg.MaxLength { + if len(t.Log) > 8192 { return xerrors.Errorf("Slice value in field t.Log was too long") } @@ -58,10 +57,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CommD (cid.Cid) (struct) - if len("CommD") > cbg.MaxLength { + if len("CommD") > 8192 { return xerrors.Errorf("Value in field \"CommD\" was too long") } @@ -83,7 +83,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.CommR (cid.Cid) (struct) - if len("CommR") > cbg.MaxLength { + if len("CommR") > 8192 { return xerrors.Errorf("Value in field \"CommR\" was too long") } @@ -105,7 +105,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.Proof ([]uint8) (slice) - if len("Proof") > cbg.MaxLength { + if len("Proof") > 8192 { return xerrors.Errorf("Value in field \"Proof\" was too long") } @@ -116,7 +116,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Proof) > cbg.ByteArrayMaxLen { + if len(t.Proof) > 2097152 { return xerrors.Errorf("Byte array in field t.Proof was too long") } @@ -124,12 +124,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.Proof[:]); err != nil { + if _, err := cw.Write(t.Proof); err != nil { return err } // t.State (sealing.SectorState) (string) - if len("State") > cbg.MaxLength { + if len("State") > 8192 { return xerrors.Errorf("Value in field \"State\" was too long") } @@ -140,7 +140,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.State) > cbg.MaxLength { + if len(t.State) > 8192 { return xerrors.Errorf("Value in field t.State was too long") } @@ -151,8 +151,8 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.Pieces ([]api.SectorPiece) (slice) - if len("Pieces") > cbg.MaxLength { + // t.Pieces ([]sealing.SafeSectorPiece) (slice) + if len("Pieces") > 8192 { return xerrors.Errorf("Value in field \"Pieces\" was too long") } @@ -163,7 +163,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Pieces) > cbg.MaxLength { + if len(t.Pieces) > 8192 { return xerrors.Errorf("Slice value in field t.Pieces was too long") } @@ -174,10 +174,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.Return (sealing.ReturnState) (string) - if len("Return") > cbg.MaxLength { + if len("Return") > 8192 { return xerrors.Errorf("Value in field \"Return\" was too long") } @@ -188,7 +189,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.Return) > cbg.MaxLength { + if len(t.Return) > 8192 { return xerrors.Errorf("Value in field t.Return was too long") } @@ -200,7 +201,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.LastErr (string) (string) - if len("LastErr") > cbg.MaxLength { + if len("LastErr") > 8192 { return xerrors.Errorf("Value in field \"LastErr\" was too long") } @@ -211,7 +212,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.LastErr) > cbg.MaxLength { + if len(t.LastErr) > 8192 { return xerrors.Errorf("Value in field t.LastErr was too long") } @@ -222,8 +223,8 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - // t.CCPieces ([]api.SectorPiece) (slice) - if len("CCPieces") > cbg.MaxLength { + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) + if len("CCPieces") > 8192 { return xerrors.Errorf("Value in field \"CCPieces\" was too long") } @@ -234,7 +235,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.CCPieces) > cbg.MaxLength { + if len(t.CCPieces) > 8192 { return xerrors.Errorf("Slice value in field t.CCPieces was too long") } @@ -245,10 +246,11 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } // t.CCUpdate (bool) (bool) - if len("CCUpdate") > cbg.MaxLength { + if len("CCUpdate") > 8192 { return xerrors.Errorf("Value in field \"CCUpdate\" was too long") } @@ -264,7 +266,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SeedEpoch (abi.ChainEpoch) (int64) - if len("SeedEpoch") > cbg.MaxLength { + if len("SeedEpoch") > 8192 { return xerrors.Errorf("Value in field \"SeedEpoch\" was too long") } @@ -286,7 +288,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SeedValue (abi.InteractiveSealRandomness) (slice) - if len("SeedValue") > cbg.MaxLength { + if len("SeedValue") > 8192 { return xerrors.Errorf("Value in field \"SeedValue\" was too long") } @@ -297,7 +299,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.SeedValue) > cbg.ByteArrayMaxLen { + if len(t.SeedValue) > 2097152 { return xerrors.Errorf("Byte array in field t.SeedValue was too long") } @@ -305,12 +307,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.SeedValue[:]); err != nil { + if _, err := cw.Write(t.SeedValue); err != nil { return err } // t.SectorType (abi.RegisteredSealProof) (int64) - if len("SectorType") > cbg.MaxLength { + if len("SectorType") > 8192 { return xerrors.Errorf("Value in field \"SectorType\" was too long") } @@ -332,7 +334,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TicketEpoch (abi.ChainEpoch) (int64) - if len("TicketEpoch") > cbg.MaxLength { + if len("TicketEpoch") > 8192 { return xerrors.Errorf("Value in field \"TicketEpoch\" was too long") } @@ -354,7 +356,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TicketValue (abi.SealRandomness) (slice) - if len("TicketValue") > cbg.MaxLength { + if len("TicketValue") > 8192 { return xerrors.Errorf("Value in field \"TicketValue\" was too long") } @@ -365,7 +367,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.TicketValue) > cbg.ByteArrayMaxLen { + if len(t.TicketValue) > 2097152 { return xerrors.Errorf("Byte array in field t.TicketValue was too long") } @@ -373,12 +375,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.TicketValue[:]); err != nil { + if _, err := cw.Write(t.TicketValue); err != nil { return err } // t.CreationTime (int64) (int64) - if len("CreationTime") > cbg.MaxLength { + if len("CreationTime") > 8192 { return xerrors.Errorf("Value in field \"CreationTime\" was too long") } @@ -400,7 +402,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.SectorNumber (abi.SectorNumber) (uint64) - if len("SectorNumber") > cbg.MaxLength { + if len("SectorNumber") > 8192 { return xerrors.Errorf("Value in field \"SectorNumber\" was too long") } @@ -416,7 +418,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TerminatedAt (abi.ChainEpoch) (int64) - if len("TerminatedAt") > cbg.MaxLength { + if len("TerminatedAt") > 8192 { return xerrors.Errorf("Value in field \"TerminatedAt\" was too long") } @@ -438,7 +440,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.UpdateSealed (cid.Cid) (struct) - if len("UpdateSealed") > cbg.MaxLength { + if len("UpdateSealed") > 8192 { return xerrors.Errorf("Value in field \"UpdateSealed\" was too long") } @@ -460,7 +462,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.CommitMessage (cid.Cid) (struct) - if len("CommitMessage") > cbg.MaxLength { + if len("CommitMessage") > 8192 { return xerrors.Errorf("Value in field \"CommitMessage\" was too long") } @@ -482,7 +484,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.InvalidProofs (uint64) (uint64) - if len("InvalidProofs") > cbg.MaxLength { + if len("InvalidProofs") > 8192 { return xerrors.Errorf("Value in field \"InvalidProofs\" was too long") } @@ -498,7 +500,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit1Out (storiface.PreCommit1Out) (slice) - if len("PreCommit1Out") > cbg.MaxLength { + if len("PreCommit1Out") > 8192 { return xerrors.Errorf("Value in field \"PreCommit1Out\" was too long") } @@ -509,7 +511,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.PreCommit1Out) > cbg.ByteArrayMaxLen { + if len(t.PreCommit1Out) > 2097152 { return xerrors.Errorf("Byte array in field t.PreCommit1Out was too long") } @@ -517,12 +519,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.PreCommit1Out[:]); err != nil { + if _, err := cw.Write(t.PreCommit1Out); err != nil { return err } // t.FaultReportMsg (cid.Cid) (struct) - if len("FaultReportMsg") > cbg.MaxLength { + if len("FaultReportMsg") > 8192 { return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long") } @@ -544,7 +546,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.UpdateUnsealed (cid.Cid) (struct) - if len("UpdateUnsealed") > cbg.MaxLength { + if len("UpdateUnsealed") > 8192 { return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long") } @@ -566,7 +568,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit1Fails (uint64) (uint64) - if len("PreCommit1Fails") > cbg.MaxLength { + if len("PreCommit1Fails") > 8192 { return xerrors.Errorf("Value in field \"PreCommit1Fails\" was too long") } @@ -582,7 +584,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommit2Fails (uint64) (uint64) - if len("PreCommit2Fails") > cbg.MaxLength { + if len("PreCommit2Fails") > 8192 { return xerrors.Errorf("Value in field \"PreCommit2Fails\" was too long") } @@ -598,7 +600,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitTipSet (types.TipSetKey) (struct) - if len("PreCommitTipSet") > cbg.MaxLength { + if len("PreCommitTipSet") > 8192 { return xerrors.Errorf("Value in field \"PreCommitTipSet\" was too long") } @@ -614,7 +616,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataCache (storiface.SectorLocation) (struct) - if len("RemoteDataCache") > cbg.MaxLength { + if len("RemoteDataCache") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataCache\" was too long") } @@ -630,7 +632,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitDeposit (big.Int) (struct) - if len("PreCommitDeposit") > cbg.MaxLength { + if len("PreCommitDeposit") > 8192 { return xerrors.Errorf("Value in field \"PreCommitDeposit\" was too long") } @@ -646,7 +648,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.PreCommitMessage (cid.Cid) (struct) - if len("PreCommitMessage") > cbg.MaxLength { + if len("PreCommitMessage") > 8192 { return xerrors.Errorf("Value in field \"PreCommitMessage\" was too long") } @@ -668,7 +670,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataSealed (storiface.SectorLocation) (struct) - if len("RemoteDataSealed") > cbg.MaxLength { + if len("RemoteDataSealed") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataSealed\" was too long") } @@ -684,7 +686,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.TerminateMessage (cid.Cid) (struct) - if len("TerminateMessage") > cbg.MaxLength { + if len("TerminateMessage") > 8192 { return xerrors.Errorf("Value in field \"TerminateMessage\" was too long") } @@ -706,7 +708,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteDataUnsealed (storiface.SectorLocation) (struct) - if len("RemoteDataUnsealed") > cbg.MaxLength { + if len("RemoteDataUnsealed") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataUnsealed\" was too long") } @@ -722,7 +724,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.ReplicaUpdateProof (storiface.ReplicaUpdateProof) (slice) - if len("ReplicaUpdateProof") > cbg.MaxLength { + if len("ReplicaUpdateProof") > 8192 { return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long") } @@ -733,7 +735,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen { + if len(t.ReplicaUpdateProof) > 2097152 { return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long") } @@ -741,12 +743,12 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if _, err := cw.Write(t.ReplicaUpdateProof[:]); err != nil { + if _, err := cw.Write(t.ReplicaUpdateProof); err != nil { return err } // t.RemoteDataFinalized (bool) (bool) - if len("RemoteDataFinalized") > cbg.MaxLength { + if len("RemoteDataFinalized") > 8192 { return xerrors.Errorf("Value in field \"RemoteDataFinalized\" was too long") } @@ -762,7 +764,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.ReplicaUpdateMessage (cid.Cid) (struct) - if len("ReplicaUpdateMessage") > cbg.MaxLength { + if len("ReplicaUpdateMessage") > 8192 { return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long") } @@ -784,7 +786,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteCommit1Endpoint (string) (string) - if len("RemoteCommit1Endpoint") > cbg.MaxLength { + if len("RemoteCommit1Endpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteCommit1Endpoint\" was too long") } @@ -795,7 +797,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteCommit1Endpoint) > cbg.MaxLength { + if len(t.RemoteCommit1Endpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteCommit1Endpoint was too long") } @@ -807,7 +809,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteCommit2Endpoint (string) (string) - if len("RemoteCommit2Endpoint") > cbg.MaxLength { + if len("RemoteCommit2Endpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteCommit2Endpoint\" was too long") } @@ -818,7 +820,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteCommit2Endpoint) > cbg.MaxLength { + if len(t.RemoteCommit2Endpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteCommit2Endpoint was too long") } @@ -830,7 +832,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { } // t.RemoteSealingDoneEndpoint (string) (string) - if len("RemoteSealingDoneEndpoint") > cbg.MaxLength { + if len("RemoteSealingDoneEndpoint") > 8192 { return xerrors.Errorf("Value in field \"RemoteSealingDoneEndpoint\" was too long") } @@ -841,7 +843,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } - if len(t.RemoteSealingDoneEndpoint) > cbg.MaxLength { + if len(t.RemoteSealingDoneEndpoint) > 8192 { return xerrors.Errorf("Value in field t.RemoteSealingDoneEndpoint was too long") } @@ -883,7 +885,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -900,7 +902,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Log: array too large (%d)", extra) } @@ -928,9 +930,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CommD (cid.Cid) (struct) case "CommD": @@ -985,7 +987,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.Proof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -996,21 +998,22 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.Proof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.Proof[:]); err != nil { + if _, err := io.ReadFull(cr, t.Proof); err != nil { return err } + // t.State (sealing.SectorState) (string) case "State": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } t.State = SectorState(sval) } - // t.Pieces ([]api.SectorPiece) (slice) + // t.Pieces ([]sealing.SafeSectorPiece) (slice) case "Pieces": maj, extra, err = cr.ReadHeader() @@ -1018,7 +1021,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Pieces: array too large (%d)", extra) } @@ -1027,7 +1030,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.Pieces = make([]api.SectorPiece, extra) + t.Pieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1046,14 +1049,14 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.Return (sealing.ReturnState) (string) case "Return": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1064,14 +1067,14 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "LastErr": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } t.LastErr = string(sval) } - // t.CCPieces ([]api.SectorPiece) (slice) + // t.CCPieces ([]sealing.SafeSectorPiece) (slice) case "CCPieces": maj, extra, err = cr.ReadHeader() @@ -1079,7 +1082,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.CCPieces: array too large (%d)", extra) } @@ -1088,7 +1091,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } if extra > 0 { - t.CCPieces = make([]api.SectorPiece, extra) + t.CCPieces = make([]SafeSectorPiece, extra) } for i := 0; i < int(extra); i++ { @@ -1107,9 +1110,9 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { } } + } } - // t.CCUpdate (bool) (bool) case "CCUpdate": @@ -1132,10 +1135,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "SeedEpoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1162,7 +1165,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.SeedValue: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1173,17 +1176,18 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.SeedValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.SeedValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.SeedValue); err != nil { return err } + // t.SectorType (abi.RegisteredSealProof) (int64) case "SectorType": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1206,10 +1210,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "TicketEpoch": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1236,7 +1240,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.TicketValue: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1247,17 +1251,18 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.TicketValue = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.TicketValue[:]); err != nil { + if _, err := io.ReadFull(cr, t.TicketValue); err != nil { return err } + // t.CreationTime (int64) (int64) case "CreationTime": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1295,10 +1300,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "TerminatedAt": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -1386,7 +1391,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.PreCommit1Out: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1397,9 +1402,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.PreCommit1Out = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.PreCommit1Out[:]); err != nil { + if _, err := io.ReadFull(cr, t.PreCommit1Out); err != nil { return err } + // t.FaultReportMsg (cid.Cid) (struct) case "FaultReportMsg": @@ -1610,7 +1616,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra) } if maj != cbg.MajByteString { @@ -1621,9 +1627,10 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { t.ReplicaUpdateProof = make([]uint8, extra) } - if _, err := io.ReadFull(cr, t.ReplicaUpdateProof[:]); err != nil { + if _, err := io.ReadFull(cr, t.ReplicaUpdateProof); err != nil { return err } + // t.RemoteDataFinalized (bool) (bool) case "RemoteDataFinalized": @@ -1669,7 +1676,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteCommit1Endpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1680,7 +1687,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteCommit2Endpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1691,7 +1698,7 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) (err error) { case "RemoteSealingDoneEndpoint": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1720,7 +1727,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Kind (string) (string) - if len("Kind") > cbg.MaxLength { + if len("Kind") > 8192 { return xerrors.Errorf("Value in field \"Kind\" was too long") } @@ -1731,7 +1738,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Kind) > cbg.MaxLength { + if len(t.Kind) > 8192 { return xerrors.Errorf("Value in field t.Kind was too long") } @@ -1743,7 +1750,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Trace (string) (string) - if len("Trace") > cbg.MaxLength { + if len("Trace") > 8192 { return xerrors.Errorf("Value in field \"Trace\" was too long") } @@ -1754,7 +1761,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Trace) > cbg.MaxLength { + if len(t.Trace) > 8192 { return xerrors.Errorf("Value in field t.Trace was too long") } @@ -1766,7 +1773,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Message (string) (string) - if len("Message") > cbg.MaxLength { + if len("Message") > 8192 { return xerrors.Errorf("Value in field \"Message\" was too long") } @@ -1777,7 +1784,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { return err } - if len(t.Message) > cbg.MaxLength { + if len(t.Message) > 8192 { return xerrors.Errorf("Value in field t.Message was too long") } @@ -1789,7 +1796,7 @@ func (t *Log) MarshalCBOR(w io.Writer) error { } // t.Timestamp (uint64) (uint64) - if len("Timestamp") > cbg.MaxLength { + if len("Timestamp") > 8192 { return xerrors.Errorf("Value in field \"Timestamp\" was too long") } @@ -1836,7 +1843,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1849,7 +1856,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Kind": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1860,7 +1867,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Trace": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -1871,7 +1878,7 @@ func (t *Log) UnmarshalCBOR(r io.Reader) (err error) { case "Message": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/storage/pipeline/checks.go b/storage/pipeline/checks.go index ecd160231..1f21b9c63 100644 --- a/storage/pipeline/checks.go +++ b/storage/pipeline/checks.go @@ -4,6 +4,7 @@ import ( "bytes" "context" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -12,9 +13,9 @@ import ( "github.com/filecoin-project/go-state-types/crypto" prooftypes "github.com/filecoin-project/go-state-types/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" ) // TODO: For now we handle this by halting state execution, when we get jsonrpc reconnecting @@ -41,7 +42,7 @@ type ErrCommitWaitFailed struct{ error } type ErrBadRU struct{ error } type ErrBadPR struct{ error } -func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []api.SectorPiece, api SealingAPI, mustHaveDeals bool) error { +func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber, pieces []SafeSectorPiece, api SealingAPI, mustHaveDeals bool) error { ts, err := api.ChainHead(ctx) if err != nil { return &ErrApi{xerrors.Errorf("getting chain head: %w", err)} @@ -51,43 +52,84 @@ func checkPieces(ctx context.Context, maddr address.Address, sn abi.SectorNumber var offset abi.PaddedPieceSize for i, p := range pieces { + p, i := p, i + // check that the piece is correctly aligned - if offset%p.Piece.Size != 0 { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece.Size, offset, offset%p.Piece.Size)} + if offset%p.Piece().Size != 0 { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d is not aligned: size=%xh offset=%xh off-by=%xh", sn, i, p.Piece().Size, offset, offset%p.Piece().Size)} } - offset += p.Piece.Size + offset += p.Piece().Size - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece.PieceCID)} - } - continue - } + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(pi UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) - dealCount++ + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !p.Piece().PieceCID.Equals(exp) { + return &ErrInvalidPiece{xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sn, i, p.Piece().PieceCID)} + } - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) + return nil + }, + BuiltinMarketHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + deal, err := api.StateMarketStorageDeal(ctx, p.Impl().DealID, ts.Key()) + if err != nil { + return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.Impl().DealID, i, err)} + } + + if deal.Proposal.Provider != maddr { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.Provider, maddr)} + } + + if deal.Proposal.PieceCID != p.Piece().PieceCID { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.Impl().DealID, p.Impl().DealProposal.PieceCID, deal.Proposal.PieceCID)} + } + + if p.Piece().Size != deal.Proposal.PieceSize { + return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize)} + } + + if ts.Height() >= deal.Proposal.StartEpoch { + return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height())} + } + + return nil + }, + DDOHandler: func(pi UniversalPieceInfo) error { + dealCount++ + + // try to get allocation to see if that still works + all, err := pi.GetAllocation(ctx, api, ts.Key()) + if err != nil { + return xerrors.Errorf("getting deal %d allocation: %w", p.Impl().DealID, err) + } + if all != nil { + mid, err := address.IDFromAddress(maddr) + if err != nil { + return xerrors.Errorf("getting miner id: %w", err) + } + + if all.Provider != abi.ActorID(mid) { + return xerrors.Errorf("allocation provider doesn't match miner") + } + + if ts.Height() >= all.Expiration { + return &ErrExpiredDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.Impl().DealID, all.Expiration, ts.Height())} + } + + if all.Size < p.Piece().Size { + return &ErrInvalidDeals{xerrors.Errorf("piece allocation %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.Impl().DealID, p.Piece().Size, all.Size)} + } + } + + return nil + }, + }) if err != nil { - return &ErrInvalidDeals{xerrors.Errorf("getting deal %d for piece %d: %w", p.DealInfo.DealID, i, err)} - } - - if deal.Proposal.Provider != maddr { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.Provider, maddr)} - } - - if deal.Proposal.PieceCID != p.Piece.PieceCID { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID)} - } - - if p.Piece.Size != deal.Proposal.PieceSize { - return &ErrInvalidDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(pieces), sn, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize)} - } - - if ts.Height() >= deal.Proposal.StartEpoch { - return &ErrExpiredDeals{xerrors.Errorf("piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(pieces), sn, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height())} + return err } } @@ -106,8 +148,8 @@ func checkPrecommit(ctx context.Context, maddr address.Address, si SectorInfo, t return err } - if si.hasDeals() { - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + if si.hasData() { + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} } @@ -223,8 +265,7 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, } // check that sector info is good after running a replica update -func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tsk types.TipSetKey, api SealingAPI) error { - +func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { if err := checkPieces(ctx, maddr, si.SectorNumber, si.Pieces, api, true); err != nil { return err } @@ -232,9 +273,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf return xerrors.Errorf("replica update on sector not marked for update") } - commD, err := api.StateComputeDataCID(ctx, maddr, si.SectorType, si.dealIDs(), tsk) + commD, err := computeUnsealedCIDFromPieces(si) if err != nil { - return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + return xerrors.Errorf("computing unsealed CID from pieces: %w", err) } if si.UpdateUnsealed == nil { @@ -253,5 +294,9 @@ func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInf } return nil - +} + +func computeUnsealedCIDFromPieces(si SectorInfo) (cid.Cid, error) { + pcs := si.pieceInfos() + return ffiwrapper.GenerateUnsealedCID(si.SectorType, pcs) } diff --git a/storage/pipeline/commit_batch.go b/storage/pipeline/commit_batch.go index 754f31763..d702d3078 100644 --- a/storage/pipeline/commit_batch.go +++ b/storage/pipeline/commit_batch.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -57,6 +58,9 @@ type AggregateInput struct { Spt abi.RegisteredSealProof Info proof.AggregateSealVerifyInfo Proof []byte + + ActivationManifest miner.SectorActivationManifest + DealIDPrecommit bool } type CommitBatcher struct { @@ -205,17 +209,22 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, return nil, xerrors.Errorf("getting config: %w", err) } - if notif && total < cfg.MaxCommitBatch { + if notif && total < cfg.MaxCommitBatch && cfg.AggregateCommits { return nil, nil } - var res []sealiface.CommitBatchRes + var res, resV1 []sealiface.CommitBatchRes ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err } + nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) + if err != nil { + return nil, xerrors.Errorf("getting network version: %s", err) + } + blackedOut := func() bool { const nv16BlackoutWindow = abi.ChainEpoch(20) // a magik number if ts.Height() <= build.UpgradeSkyrHeight && build.UpgradeSkyrHeight-ts.Height() < nv16BlackoutWindow { @@ -224,7 +233,7 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, return false } - individual := (total < cfg.MinCommitBatch) || (total < miner.MinAggregatedSectors) || blackedOut() + individual := (total < cfg.MinCommitBatch) || (total < miner.MinAggregatedSectors) || blackedOut() || !cfg.AggregateCommits if !individual && !cfg.AggregateAboveBaseFee.Equals(big.Zero()) { if ts.MinTicketBlock().ParentBaseFee.LessThan(cfg.AggregateAboveBaseFee) { @@ -232,25 +241,67 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } - if individual { - res, err = b.processIndividually(cfg) - } else { + if nv >= MinDDONetworkVersion { + // After nv21, we have a new ProveCommitSectors2 method, which supports + // batching without aggregation, but it doesn't support onboarding + // sectors which were precommitted with DealIDs in the precommit message. + // We prefer it for all other sectors, so first we use the new processBatchV2 + var sectors []abi.SectorNumber for sn := range b.todo { sectors = append(sectors, sn) } - res, err = b.processBatch(cfg, sectors) + res, err = b.processBatchV2(cfg, sectors, nv, !individual) + if err != nil { + err = xerrors.Errorf("processBatchV2: %w", err) + } + + // Mark sectors as done + for _, r := range res { + if err != nil { + r.Error = err.Error() + } + + for _, sn := range r.Sectors { + for _, ch := range b.waiting[sn] { + ch <- r // buffered + } + + delete(b.waiting, sn) + delete(b.todo, sn) + delete(b.cutoffs, sn) + } + } } if err != nil { - log.Warnf("CommitBatcher maybeStartBatch individual:%v processBatch %v", individual, err) + log.Warnf("CommitBatcher maybeStartBatch processBatch-ddo %v", err) } if err != nil && len(res) == 0 { return nil, err } - for _, r := range res { + if individual { + resV1, err = b.processIndividually(cfg) + } else { + var sectors []abi.SectorNumber + for sn := range b.todo { + sectors = append(sectors, sn) + } + resV1, err = b.processBatchV1(cfg, sectors, nv) + } + + if err != nil { + log.Warnf("CommitBatcher maybeStartBatch individual:%v processBatch %v", individual, err) + } + + if err != nil && len(resV1) == 0 { + return nil, err + } + + // Mark the rest as processed + for _, r := range resV1 { if err != nil { r.Error = err.Error() } @@ -266,10 +317,169 @@ func (b *CommitBatcher) maybeStartBatch(notif bool) ([]sealiface.CommitBatchRes, } } + res = append(res, resV1...) + return res, nil } -func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorNumber) ([]sealiface.CommitBatchRes, error) { +// processBatchV2 processes a batch of sectors after nv22. It will always send +// ProveCommitSectors3Params which may contain either individual proofs or an +// aggregate proof depending on SP condition and network conditions. +func (b *CommitBatcher) processBatchV2(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version, aggregate bool) ([]sealiface.CommitBatchRes, error) { + ts, err := b.api.ChainHead(b.mctx) + if err != nil { + return nil, err + } + + // sort sectors by number + sort.Slice(sectors, func(i, j int) bool { return sectors[i] < sectors[j] }) + + total := len(sectors) + + res := sealiface.CommitBatchRes{ + FailedSectors: map[abi.SectorNumber]string{}, + } + + params := miner.ProveCommitSectors3Params{ + RequireActivationSuccess: cfg.RequireActivationSuccess, + RequireNotificationSuccess: cfg.RequireNotificationSuccess, + } + + infos := make([]proof.AggregateSealVerifyInfo, 0, total) + collateral := big.Zero() + + for _, sector := range sectors { + if b.todo[sector].DealIDPrecommit { + // can't process sectors precommitted with deal IDs with ProveCommitSectors2 + continue + } + + res.Sectors = append(res.Sectors, sector) + + sc, err := b.getSectorCollateral(sector, ts.Key()) + if err != nil { + res.FailedSectors[sector] = err.Error() + continue + } + + collateral = big.Add(collateral, sc) + + params.SectorActivations = append(params.SectorActivations, b.todo[sector].ActivationManifest) + params.SectorProofs = append(params.SectorProofs, b.todo[sector].Proof) + + infos = append(infos, b.todo[sector].Info) + } + + if len(infos) == 0 { + return nil, nil + } + + proofs := make([][]byte, 0, total) + for _, info := range infos { + proofs = append(proofs, b.todo[info.Number].Proof) + } + + needFunds := collateral + + if aggregate { + params.SectorProofs = nil // can't be set when aggregating + arp, err := b.aggregateProofType(nv) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate proof type: %w", err) + } + params.AggregateProofType = &arp + + mid, err := address.IDFromAddress(b.maddr) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) + } + + params.AggregateProof, err = b.prover.AggregateSealProofs(proof.AggregateSealVerifyProofAndInfos{ + Miner: abi.ActorID(mid), + SealProof: b.todo[infos[0].Number].Spt, + AggregateProof: arp, + Infos: infos, + }, proofs) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("aggregating proofs: %w", err) + } + + aggFeeRaw, err := policy.AggregateProveCommitNetworkFee(nv, len(infos), ts.MinTicketBlock().ParentBaseFee) + if err != nil { + res.Error = err.Error() + log.Errorf("getting aggregate commit network fee: %s", err) + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting aggregate commit network fee: %s", err) + } + + aggFee := big.Div(big.Mul(aggFeeRaw, aggFeeNum), aggFeeDen) + + needFunds = big.Add(collateral, aggFee) + } + + needFunds, err = collateralSendAmount(b.mctx, b.api, b.maddr, cfg, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, err + } + + maxFee := b.feeCfg.MaxCommitBatchGasFee.FeeForSectors(len(infos)) + goodFunds := big.Add(maxFee, needFunds) + + mi, err := b.api.StateMinerInfo(b.mctx, b.maddr, types.EmptyTSK) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't get miner info: %w", err) + } + + from, _, err := b.addrSel.AddressFor(b.mctx, b.api, mi, api.CommitAddr, goodFunds, needFunds) + if err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("no good address found: %w", err) + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("couldn't serialize ProveCommitSectors3Params: %w", err) + } + + _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + + if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(sectors) < miner.MinAggregatedSectors*2) { + log.Errorf("simulating CommitBatch message failed (%x): %s", enc.Bytes(), err) + res.Error = err.Error() + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("simulating CommitBatch message failed: %w", err) + } + + msgTooLarge := len(enc.Bytes()) > (messagepool.MaxMessageSize - 128) + + // If we're out of gas, split the batch in half and evaluate again + if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || msgTooLarge { + log.Warnf("CommitAggregate message ran out of gas or is too large, splitting batch in half and trying again (sectors: %d, params: %d)", len(sectors), len(enc.Bytes())) + mid := len(sectors) / 2 + ret0, _ := b.processBatchV2(cfg, sectors[:mid], nv, aggregate) + ret1, _ := b.processBatchV2(cfg, sectors[mid:], nv, aggregate) + + return append(ret0, ret1...), nil + } + + mcid, err := sendMsg(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitSectors3, needFunds, maxFee, enc.Bytes()) + if err != nil { + return []sealiface.CommitBatchRes{res}, xerrors.Errorf("sending message failed (params size: %d, sectors: %d, agg: %t): %w", len(enc.Bytes()), len(sectors), aggregate, err) + } + + res.Msg = &mcid + + log.Infow("Sent ProveCommitSectors3 message", "cid", mcid, "from", from, "todo", total, "sectors", len(infos)) + + return []sealiface.CommitBatchRes{res}, nil +} + +// processBatchV1 processes a batch of sectors before nv22. It always sends out an aggregate message. +func (b *CommitBatcher) processBatchV1(cfg sealiface.Config, sectors []abi.SectorNumber, nv network.Version) ([]sealiface.CommitBatchRes, error) { ts, err := b.api.ChainHead(b.mctx) if err != nil { return nil, err @@ -322,13 +532,6 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting miner id: %w", err) } - nv, err := b.api.StateNetworkVersion(b.mctx, ts.Key()) - if err != nil { - res.Error = err.Error() - log.Errorf("getting network version: %s", err) - return []sealiface.CommitBatchRes{res}, xerrors.Errorf("getting network version: %s", err) - } - arp, err := b.aggregateProofType(nv) if err != nil { res.Error = err.Error() @@ -387,7 +590,7 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN _, err = simulateMsgGas(b.mctx, b.api, from, b.maddr, builtin.MethodsMiner.ProveCommitAggregate, needFunds, maxFee, enc.Bytes()) if err != nil && (!api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) || len(sectors) < miner.MinAggregatedSectors*2) { - log.Errorf("simulating CommitBatch message failed: %s", err) + log.Errorf("simulating CommitBatch message failed (%x): %s", enc.Bytes(), err) res.Error = err.Error() return []sealiface.CommitBatchRes{res}, xerrors.Errorf("simulating CommitBatch message failed: %w", err) } @@ -396,8 +599,8 @@ func (b *CommitBatcher) processBatch(cfg sealiface.Config, sectors []abi.SectorN if api.ErrorIsIn(err, []error{&api.ErrOutOfGas{}}) { log.Warnf("CommitAggregate message ran out of gas, splitting batch in half and trying again (sectors: %d)", len(sectors)) mid := len(sectors) / 2 - ret0, _ := b.processBatch(cfg, sectors[:mid]) - ret1, _ := b.processBatch(cfg, sectors[mid:]) + ret0, _ := b.processBatchV1(cfg, sectors[:mid], nv) + ret1, _ := b.processBatchV1(cfg, sectors[mid:], nv) return append(ret0, ret1...), nil } @@ -484,6 +687,10 @@ func (b *CommitBatcher) processIndividually(cfg sealiface.Config) ([]sealiface.C } func (b *CommitBatcher) processSingle(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { + return b.processSingleV1(cfg, mi, avail, sn, info, tsk) +} + +func (b *CommitBatcher) processSingleV1(cfg sealiface.Config, mi api.MinerInfo, avail *abi.TokenAmount, sn abi.SectorNumber, info AggregateInput, tsk types.TipSetKey) (cid.Cid, error) { enc := new(bytes.Buffer) params := &miner.ProveCommitSectorParams{ SectorNumber: sn, @@ -646,11 +853,15 @@ func (b *CommitBatcher) getCommitCutoff(si SectorInfo) (time.Time, error) { cutoffEpoch := pci.PreCommitEpoch + mpcd for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch + startEpoch, err := p.StartEpoch() + if err != nil { + log.Errorf("getting deal start epoch: %s", err) + return time.Now(), err + } if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } diff --git a/storage/pipeline/commit_batch_test.go b/storage/pipeline/commit_batch_test.go deleted file mode 100644 index 5ae2f171a..000000000 --- a/storage/pipeline/commit_batch_test.go +++ /dev/null @@ -1,498 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/network" - prooftypes "github.com/filecoin-project/go-state-types/proof" - miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/storage/ctladdr" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" - "github.com/filecoin-project/lotus/storage/sealer/storiface" -) - -func TestCommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner5.MaxAggregatedSectors - minBatch := miner5.MinAggregatedSectors - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - - AggregateCommits: true, - MinCommitBatch: minBatch, - MaxCommitBatch: maxBatch, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - AggregateAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - BatchPreCommitAboveBaseFee: types.BigMul(types.PicoFil, types.NewInt(150)), // 0.15 nFIL - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - var pcres sealiface.CommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil) - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddCommit(ctx, si, pipeline.AggregateInput{ - Info: prooftypes.AggregateSealVerifyInfo{ - Number: sn, - }, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001 - expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - basefee := types.PicoFil - if aboveBalancer { - basefee = types.NanoFil - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - /*if batch { - s.EXPECT().ChainBaseFee(gomock.Any(), gomock.Any()).Return(basefee, nil) - }*/ - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil) - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version13, nil) - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - if batch { - var params miner5.ProveCommitAggregateParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for _, number := range expect { - set, err := params.SectorNumbers.IsSet(uint64(number)) - require.NoError(t, err) - require.True(t, set) - } - } else { - var params miner5.ProveCommitSectorParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - } - return true - }), gomock.Any()).Return(dummySmsg, nil).Times(ti) - return nil - } - } - - expectProcessBatch := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - - ti := len(expect) - batch := false - if ti >= minBatch { - batch = true - ti = 1 - } - - if !aboveBalancer { - batch = false - ti = len(expect) - } - - pciC := len(expect) - if failOnePCI { - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), abi.SectorNumber(1), gomock.Any()).Return(nil, nil).Times(1) // not found - pciC = len(expect) - 1 - if !batch { - ti-- - } - } - s.EXPECT().StateSectorPreCommitInfo(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&minertypes.SectorPreCommitOnChainInfo{ - PreCommitDeposit: big.Zero(), - }, nil).Times(pciC) - s.EXPECT().StateMinerInitialPledgeCollateral(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(big.Zero(), nil).Times(pciC) - - if batch { - s.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version18, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - } - return nil - } - } - - flush := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { - return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *pipeline.CommitBatcher) promise { - _ = expectSend(expect, aboveBalancer, failOnePCI)(t, s, pcb) - - batch := len(expect) >= minBatch && aboveBalancer - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - if batch { - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - if !failOnePCI { - require.Len(t, r[0].FailedSectors, 0) - } else { - require.Len(t, r[0].FailedSectors, 1) - _, found := r[0].FailedSectors[1] - require.True(t, found) - } - } else { - require.Len(t, r, len(expect)) - for _, res := range r { - require.Len(t, res.Sectors, 1) - require.Empty(t, res.Error) - } - sort.Slice(r, func(i, j int) bool { - return r[i].Sectors[0] < r[j].Sectors[0] - }) - for i, res := range r { - require.Equal(t, abi.SectorNumber(i), res.Sectors[0]) - if failOnePCI && res.Sectors[0] == 1 { - require.Len(t, res.FailedSectors, 1) - _, found := res.FailedSectors[1] - require.True(t, found) - } else { - require.Empty(t, res.FailedSectors) - } - } - } - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle-aboveBalancer": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}, true, false), - }, - }, - "addTwo-aboveBalancer": { - actions: []action{ - addSectors(getSectors(2), true), - waitPending(2), - flush(getSectors(2), true, false), - }, - }, - "addAte-aboveBalancer": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, false), - }, - }, - "addMax-aboveBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-aboveBalancer-gasAboveLimit": { - actions: []action{ - expectProcessBatch(getSectors(maxBatch), true, false, true), - expectSend(getSectors(maxBatch)[:maxBatch/2], true, false), - expectSend(getSectors(maxBatch)[maxBatch/2:], true, false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addSingle-belowBalancer": { - actions: []action{ - addSector(0, false), - waitPending(1), - flush([]abi.SectorNumber{0}, false, false), - }, - }, - "addTwo-belowBalancer": { - actions: []action{ - addSectors(getSectors(2), false), - waitPending(2), - flush(getSectors(2), false, false), - }, - }, - "addAte-belowBalancer": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, false), - }, - }, - "addMax-belowBalancer": { - actions: []action{ - expectSend(getSectors(maxBatch), false, false), - addSectors(getSectors(maxBatch), false), - }, - }, - - "addAte-aboveBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), true), - waitPending(8), - flush(getSectors(8), true, true), - }, - }, - "addAte-belowBalancer-failOne": { - actions: []action{ - addSectors(getSectors(8), false), - waitPending(8), - flush(getSectors(8), false, true), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockCommitBatcherApi(mockCtrl) - - pcb := pipeline.NewCommitBatcher(ctx, t0123, pcapi, as, fc, cfg, &fakeProver{}) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type fakeProver struct{} - -func (f fakeProver) AggregateSealProofs(aggregateInfo prooftypes.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { - return []byte("Trust me, I'm a proof"), nil -} - -var _ storiface.Prover = &fakeProver{} - -var dummyAddr = func() address.Address { - a, _ := address.NewFromString("t00") - return a -}() - -func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { - dummyCid, _ := cid.Parse("bafkqaaa") - - var ts, err = types.NewTipSet([]*types.BlockHeader{ - { - Height: h, - Miner: dummyAddr, - - Parents: []cid.Cid{}, - - Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, - - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - - ParentBaseFee: basefee, - }, - }) - if t != nil { - require.NoError(t, err) - } - - return ts -} - -func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { - return makeBFTs(t, big.NewInt(0), h) -} - -var dummySmsg = &types.SignedMessage{ - Message: types.Message{ - From: dummyAddr, - To: dummyAddr, - }, - Signature: crypto.Signature{Type: crypto.SigTypeBLS}, -} - -type asel func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) - -func (s asel) AddressFor(ctx context.Context, _ ctladdr.NodeApi, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return s(ctx, mi, use, goodFunds, minFunds) -} - -var _ pipeline.AddressSelector = asel(nil) diff --git a/storage/pipeline/currentdealinfo_test.go b/storage/pipeline/currentdealinfo_test.go index 21141a35d..1ea05dc35 100644 --- a/storage/pipeline/currentdealinfo_test.go +++ b/storage/pipeline/currentdealinfo_test.go @@ -80,21 +80,21 @@ func TestGetCurrentDealInfo(t *testing.T) { } successDeal := &api.MarketDeal{ Proposal: proposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } earlierDeal := &api.MarketDeal{ Proposal: otherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, } anotherDeal := &api.MarketDeal{ Proposal: anotherProposal, - State: market.DealState{ + State: api.MarketDealState{ SectorStartEpoch: 1, LastUpdatedEpoch: 2, }, diff --git a/storage/pipeline/fsm.go b/storage/pipeline/fsm.go index ac3dafa86..ced6867d1 100644 --- a/storage/pipeline/fsm.go +++ b/storage/pipeline/fsm.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "reflect" + "runtime" "time" "golang.org/x/xerrors" @@ -39,8 +40,27 @@ func (m *Sealing) Plan(events []statemachine.Event, user interface{}) (interface return nil, processed, nil } - return func(ctx statemachine.Context, si SectorInfo) error { - err := next(ctx, si) + return func(ctx statemachine.Context, si SectorInfo) (err error) { + // handle panics + defer func() { + if r := recover(); r != nil { + buf := make([]byte, 1<<16) + n := runtime.Stack(buf, false) + buf = buf[:n] + + l := Log{ + Timestamp: uint64(time.Now().Unix()), + Message: fmt.Sprintf("panic: %v\n%s", r, buf), + Kind: "panic", + } + si.logAppend(l) + + err = fmt.Errorf("panic: %v\n%s", r, buf) + } + }() + + // execute the next state + err = next(ctx, si) if err != nil { log.Errorf("unhandled sector error (%d): %+v", si.SectorNumber, err) return nil @@ -127,8 +147,8 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto ), Committing: planCommitting, CommitFinalize: planOne( - on(SectorFinalized{}, SubmitCommit), - on(SectorFinalizedAvailable{}, SubmitCommit), + on(SectorFinalized{}, SubmitCommitAggregate), + on(SectorFinalizedAvailable{}, SubmitCommitAggregate), on(SectorFinalizeFailed{}, CommitFinalizeFailed), ), SubmitCommit: planOne( @@ -674,7 +694,7 @@ func planCommitting(events []statemachine.Event, state *SectorInfo) (uint64, err } case SectorCommitted: // the normal case e.apply(state) - state.State = SubmitCommit + state.State = SubmitCommitAggregate case SectorProofReady: // early finalize e.apply(state) state.State = CommitFinalize diff --git a/storage/pipeline/fsm_events.go b/storage/pipeline/fsm_events.go index a798a884b..94cd53e82 100644 --- a/storage/pipeline/fsm_events.go +++ b/storage/pipeline/fsm_events.go @@ -88,7 +88,7 @@ func (evt SectorAddPiece) apply(state *SectorInfo) { } type SectorPieceAdded struct { - NewPieces []api.SectorPiece + NewPieces []SafeSectorPiece } func (evt SectorPieceAdded) apply(state *SectorInfo) { @@ -114,9 +114,11 @@ type SectorPacked struct{ FillerPieces []abi.PieceInfo } func (evt SectorPacked) apply(state *SectorInfo) { for idx := range evt.FillerPieces { - state.Pieces = append(state.Pieces, api.SectorPiece{ - Piece: evt.FillerPieces[idx], - DealInfo: nil, // filler pieces don't have deals associated with them + state.Pieces = append(state.Pieces, SafeSectorPiece{ + real: api.SectorPiece{ + Piece: evt.FillerPieces[idx], + DealInfo: nil, // filler pieces don't have deals associated with them + }, }) } } @@ -419,7 +421,8 @@ type SectorUpdateDealIDs struct { func (evt SectorUpdateDealIDs) apply(state *SectorInfo) { for i, id := range evt.Updates { - state.Pieces[i].DealInfo.DealID = id + // NOTE: all update deals are builtin-market deals + state.Pieces[i].real.DealInfo.DealID = id } } diff --git a/storage/pipeline/fsm_test.go b/storage/pipeline/fsm_test.go index 7d7201953..c403fb129 100644 --- a/storage/pipeline/fsm_test.go +++ b/storage/pipeline/fsm_test.go @@ -70,10 +70,10 @@ func TestHappyPath(t *testing.T) { require.Equal(m.t, m.state.State, Committing) m.planSingle(SectorCommitted{}) - require.Equal(m.t, m.state.State, SubmitCommit) + require.Equal(m.t, m.state.State, SubmitCommitAggregate) - m.planSingle(SectorCommitSubmitted{}) - require.Equal(m.t, m.state.State, CommitWait) + m.planSingle(SectorCommitAggregateSent{}) + require.Equal(m.t, m.state.State, CommitAggregateWait) m.planSingle(SectorProving{}) require.Equal(m.t, m.state.State, FinalizeSector) @@ -81,7 +81,7 @@ func TestHappyPath(t *testing.T) { m.planSingle(SectorFinalized{}) require.Equal(m.t, m.state.State, Proving) - expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommit, CommitWait, FinalizeSector, Proving} + expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving} for i, n := range notif { if n.before.State != expected[i] { t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) @@ -135,9 +135,6 @@ func TestHappyPathFinalizeEarly(t *testing.T) { require.Equal(m.t, m.state.State, CommitFinalize) m.planSingle(SectorFinalized{}) - require.Equal(m.t, m.state.State, SubmitCommit) - - m.planSingle(SectorSubmitCommitAggregate{}) require.Equal(m.t, m.state.State, SubmitCommitAggregate) m.planSingle(SectorCommitAggregateSent{}) @@ -149,7 +146,7 @@ func TestHappyPathFinalizeEarly(t *testing.T) { m.planSingle(SectorFinalized{}) require.Equal(m.t, m.state.State, Proving) - expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommit, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving} + expected := []SectorState{Packing, GetTicket, PreCommit1, PreCommit2, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, SubmitCommitAggregate, CommitAggregateWait, FinalizeSector, Proving} for i, n := range notif { if n.before.State != expected[i] { t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) @@ -188,9 +185,9 @@ func TestCommitFinalizeFailed(t *testing.T) { require.Equal(m.t, m.state.State, CommitFinalize) m.planSingle(SectorFinalized{}) - require.Equal(m.t, m.state.State, SubmitCommit) + require.Equal(m.t, m.state.State, SubmitCommitAggregate) - expected := []SectorState{Committing, CommitFinalize, CommitFinalizeFailed, CommitFinalize, SubmitCommit} + expected := []SectorState{Committing, CommitFinalize, CommitFinalizeFailed, CommitFinalize, SubmitCommitAggregate} for i, n := range notif { if n.before.State != expected[i] { t.Fatalf("expected before state: %s, got: %s", expected[i], n.before.State) @@ -242,10 +239,10 @@ func TestSeedRevert(t *testing.T) { // not changing the seed this time _, _, err = m.s.plan([]statemachine.Event{{User: SectorSeedReady{SeedValue: nil, SeedEpoch: 5}}, {User: SectorCommitted{}}}, m.state) require.NoError(t, err) - require.Equal(m.t, m.state.State, SubmitCommit) + require.Equal(m.t, m.state.State, SubmitCommitAggregate) - m.planSingle(SectorCommitSubmitted{}) - require.Equal(m.t, m.state.State, CommitWait) + m.planSingle(SectorCommitAggregateSent{}) + require.Equal(m.t, m.state.State, CommitAggregateWait) m.planSingle(SectorProving{}) require.Equal(m.t, m.state.State, FinalizeSector) diff --git a/storage/pipeline/input.go b/storage/pipeline/input.go index b595f533d..6d41f7e81 100644 --- a/storage/pipeline/input.go +++ b/storage/pipeline/input.go @@ -5,7 +5,6 @@ import ( "sort" "time" - "github.com/ipfs/go-cid" "go.uber.org/zap" "golang.org/x/xerrors" @@ -13,14 +12,15 @@ import ( "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/storage/pipeline/lib/nullreader" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/ffiwrapper" @@ -32,10 +32,19 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e var used abi.UnpaddedPieceSize var lastDealEnd abi.ChainEpoch for _, piece := range sector.Pieces { - used += piece.Piece.Size.Unpadded() + used += piece.Piece().Size.Unpadded() - if piece.DealInfo != nil && piece.DealInfo.DealProposal.EndEpoch > lastDealEnd { - lastDealEnd = piece.DealInfo.DealProposal.EndEpoch + if !piece.HasDealInfo() { + continue + } + + endEpoch, err := piece.EndEpoch() + if err != nil { + return xerrors.Errorf("piece.EndEpoch: %w", err) + } + + if endEpoch > lastDealEnd { + lastDealEnd = endEpoch } } @@ -65,9 +74,9 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e if _, has := m.openSectors[sid]; !has { m.openSectors[sid] = &openSector{ used: used, - maybeAccept: func(cid cid.Cid) error { + maybeAccept: func(pk piece.PieceKey) error { // todo check deal start deadline (configurable) - m.assignedPieces[sid] = append(m.assignedPieces[sid], cid) + m.assignedPieces[sid] = append(m.assignedPieces[sid], pk) return ctx.Send(SectorAddPiece{}) }, @@ -94,7 +103,7 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, used abi.UnpaddedPieceSize) (bool, error) { log := log.WithOptions(zap.Fields( zap.Uint64("sector", uint64(sector.SectorNumber)), - zap.Int("deals", len(sector.dealIDs())), + zap.Int("dataPieces", len(sector.nonPaddingPieceInfos())), )) now := time.Now() @@ -117,7 +126,7 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, return false, xerrors.Errorf("getting per-sector deal limit: %w", err) } - if len(sector.dealIDs()) >= maxDeals { + if len(sector.nonPaddingPieceInfos()) >= maxDeals { // can't accept more deals log.Infow("starting to seal deal sector", "trigger", "maxdeals") return true, ctx.Send(SectorStartPacking{}) @@ -146,13 +155,24 @@ func (m *Sealing) maybeStartSealing(ctx statemachine.Context, sector SectorInfo, var dealSafeSealEpoch abi.ChainEpoch for _, piece := range sector.Pieces { - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - dealSafeSealEpoch = piece.DealInfo.DealProposal.StartEpoch - cfg.StartEpochSealingBuffer + startEpoch, err := piece.StartEpoch() + if err != nil { + log.Errorw("failed to get start epoch for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } + + dealSafeSealEpoch = startEpoch - cfg.StartEpochSealingBuffer + + alloc, err := piece.GetAllocation(ctx.Context(), m.Api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get allocation for deal", "piece", piece.String(), "error", err) + continue // not ideal, but skipping the check should break things less + } - alloc, _ := m.Api.StateGetAllocationForPendingDeal(ctx.Context(), piece.DealInfo.DealID, types.EmptyTSK) // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue @@ -210,8 +230,8 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er var offset abi.UnpaddedPieceSize pieceSizes := make([]abi.UnpaddedPieceSize, len(sector.Pieces)) for i, p := range sector.Pieces { - pieceSizes[i] = p.Piece.Size.Unpadded() - offset += p.Piece.Size.Unpadded() + pieceSizes[i] = p.Piece().Size.Unpadded() + offset += p.Piece().Size.Unpadded() } maxDeals, err := getDealPerSectorLimit(ssize) @@ -227,7 +247,7 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er return xerrors.Errorf("piece %s assigned to sector %d not found", piece, sector.SectorNumber) } - if len(sector.dealIDs())+(i+1) > maxDeals { + if len(sector.nonPaddingPieceInfos())+(i+1) > maxDeals { // todo: this is rather unlikely to happen, but in case it does, return the deal to waiting queue instead of failing it deal.accepted(sector.SectorNumber, offset, xerrors.Errorf("too many deals assigned to sector %d, dropping deal", sector.SectorNumber)) continue @@ -263,8 +283,10 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er } pieceSizes = append(pieceSizes, p.Unpadded()) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + }, }) } @@ -278,22 +300,26 @@ func (m *Sealing) handleAddPiece(ctx statemachine.Context, sector SectorInfo) er deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - if !ppi.PieceCID.Equals(deal.deal.DealProposal.PieceCID) { - err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.DealProposal.PieceCID, ppi.PieceCID) + if !ppi.PieceCID.Equals(deal.deal.PieceCID()) { + err = xerrors.Errorf("got unexpected piece CID: expected:%s, got:%s", deal.deal.PieceCID(), ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, err) return ctx.Send(SectorAddPieceFailed{err}) } - log.Infow("deal added to a sector", "deal", deal.deal.DealID, "sector", sector.SectorNumber, "piece", ppi.PieceCID) + log.Infow("deal added to a sector", "pieceID", deal.deal.String(), "sector", sector.SectorNumber, "piece", ppi.PieceCID) deal.accepted(sector.SectorNumber, offset, nil) offset += deal.size pieceSizes = append(pieceSizes, deal.size) - res.NewPieces = append(res.NewPieces, api.SectorPiece{ - Piece: ppi, - DealInfo: &deal.deal, + dinfo := deal.deal.Impl() + + res.NewPieces = append(res.NewPieces, SafeSectorPiece{ + api.SectorPiece{ + Piece: ppi, + DealInfo: &dinfo, + }, }) } @@ -304,8 +330,13 @@ func (m *Sealing) handleAddPieceFailed(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorRetryWaitDeals{}) } -func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo) (api.SectorOffset, error) { - log.Infof("Adding piece for deal %d (publish msg: %s)", deal.DealID, deal.PublishCid) +func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo piece.PieceDealInfo) (api.SectorOffset, error) { + return m.sectorAddPieceToAny(ctx, size, data, &pieceInfo) +} + +func (m *Sealing) sectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, pieceInfo UniversalPieceInfo) (api.SectorOffset, error) { + log.Infof("Adding piece %s", pieceInfo.String()) + if (padreader.PaddedSize(uint64(size))) != size { return api.SectorOffset{}, xerrors.Errorf("cannot allocate unpadded piece") } @@ -324,10 +355,6 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{}, xerrors.Errorf("piece cannot fit into a sector") } - if _, err := deal.DealProposal.Cid(); err != nil { - return api.SectorOffset{}, xerrors.Errorf("getting proposal CID: %w", err) - } - cfg, err := m.getConfig() if err != nil { return api.SectorOffset{}, xerrors.Errorf("getting config: %w", err) @@ -337,19 +364,34 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec if err != nil { return api.SectorOffset{}, xerrors.Errorf("couldnt get chain head: %w", err) } - if ts.Height()+cfg.StartEpochSealingBuffer > deal.DealProposal.StartEpoch { - return api.SectorOffset{}, xerrors.Errorf( - "cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", - deal.DealProposal.PieceCID, ts.Height(), deal.DealProposal.StartEpoch) + + nv, err := m.Api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting network version: %w", err) } - claimTerms, err := m.getClaimTerms(ctx, deal, ts.Key()) + if err := pieceInfo.Valid(nv); err != nil { + return api.SectorOffset{}, xerrors.Errorf("piece metadata invalid: %w", err) + } + + startEpoch, err := pieceInfo.StartEpoch() + if err != nil { + return api.SectorOffset{}, xerrors.Errorf("getting last start epoch: %w", err) + } + + if ts.Height()+cfg.StartEpochSealingBuffer > startEpoch { + return api.SectorOffset{}, xerrors.Errorf( + "cannot add piece for deal with piece CID %s: current epoch %d has passed deal proposal start epoch %d", + pieceInfo.PieceCID(), ts.Height(), startEpoch) + } + + claimTerms, err := m.getClaimTerms(ctx, pieceInfo, ts.Key()) if err != nil { return api.SectorOffset{}, err } m.inputLk.Lock() - if pp, exist := m.pendingPieces[proposalCID(deal)]; exist { + if pp, exist := m.pendingPieces[pieceInfo.Key()]; exist { m.inputLk.Unlock() // we already have a pre-existing add piece call for this deal, let's wait for it to finish and see if it's successful @@ -366,7 +408,7 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec } // addPendingPiece takes over m.inputLk - pp := m.addPendingPiece(ctx, size, data, deal, claimTerms, sp) + pp := m.addPendingPiece(ctx, size, data, pieceInfo, claimTerms, sp) res, err := waitAddPieceResp(ctx, pp) if err != nil { @@ -375,32 +417,41 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } -func (m *Sealing) getClaimTerms(ctx context.Context, deal api.PieceDealInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { +func (m *Sealing) getClaimTerms(ctx context.Context, deal UniversalPieceInfo, tsk types.TipSetKey) (pieceClaimBounds, error) { + + all, err := deal.GetAllocation(ctx, m.Api, tsk) + if err != nil { + return pieceClaimBounds{}, err + } + if all != nil { + startEpoch, err := deal.StartEpoch() + if err != nil { + return pieceClaimBounds{}, err + } + + return pieceClaimBounds{ + claimTermEnd: startEpoch + all.TermMax, + }, nil + } + nv, err := m.Api.StateNetworkVersion(ctx, tsk) if err != nil { return pieceClaimBounds{}, err } - if nv >= network.Version17 { - all, err := m.Api.StateGetAllocationForPendingDeal(ctx, deal.DealID, tsk) - if err != nil { - return pieceClaimBounds{}, err - } - if all != nil { - return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.StartEpoch + all.TermMax, - }, nil - } + endEpoch, err := deal.EndEpoch() + if err != nil { + return pieceClaimBounds{}, err } // no allocation for this deal, so just use a really high number for "term end" return pieceClaimBounds{ - claimTermEnd: deal.DealProposal.EndEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, network.Version17), + claimTermEnd: endEpoch + policy.GetSectorMaxLifetime(abi.RegisteredSealProof_StackedDrg32GiBV1_1, nv), }, nil } // called with m.inputLk; transfers the lock to another goroutine! -func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal api.PieceDealInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { +func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storiface.Data, deal UniversalPieceInfo, ct pieceClaimBounds, sp abi.RegisteredSealProof) *pendingPiece { doneCh := make(chan struct{}) pp := &pendingPiece{ size: size, @@ -417,14 +468,12 @@ func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSiz close(pp.doneCh) } - log.Debugw("new pending piece", "dealId", deal.DealID, - "piece", deal.DealProposal.PieceCID, - "size", size, - "dealStart", deal.DealSchedule.StartEpoch, - "dealEnd", deal.DealSchedule.EndEpoch, + log.Debugw("new pending piece", "pieceID", deal.String(), + "dealStart", result.Wrap(deal.StartEpoch()), + "dealEnd", result.Wrap(deal.EndEpoch()), "termEnd", ct.claimTermEnd) - m.pendingPieces[proposalCID(deal)] = pp + m.pendingPieces[deal.Key()] = pp go func() { defer m.inputLk.Unlock() if err := m.updateInput(ctx, sp); err != nil { @@ -489,7 +538,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e type match struct { sector abi.SectorID - deal cid.Cid + deal piece.PieceKey dealEnd abi.ChainEpoch claimTermEnd abi.ChainEpoch @@ -499,7 +548,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } var matches []match - toAssign := map[cid.Cid]struct{}{} // used to maybe create new sectors + toAssign := map[piece.PieceKey]struct{}{} // used to maybe create new sectors // todo: this is distinctly O(n^2), may need to be optimized for tiny deals and large scale miners // (unlikely to be a problem now) @@ -523,12 +572,18 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + log.Errorf("failed to get end epoch for deal %s", piece.deal) + continue + } + if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) matches = append(matches, match{ sector: id, deal: proposalCid, - dealEnd: piece.deal.DealProposal.EndEpoch, + dealEnd: endEpoch, claimTermEnd: piece.claimTerms.claimTermEnd, size: piece.size, @@ -600,6 +655,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e log.Debugw("updateInput matching done", "matches", len(matches), "toAssign", len(toAssign), "assigned", assigned, "openSectors", len(m.openSectors), "pieces", len(m.pendingPieces)) if len(toAssign) > 0 { + log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors) if err := m.tryGetDealSector(ctx, sp, getExpirationCached); err != nil { log.Errorw("Failed to create a new sector for deals", "error", err) } @@ -609,7 +665,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } // pendingPieceIndex is an index in the Sealing.pendingPieces map -type pendingPieceIndex cid.Cid +type pendingPieceIndex piece.PieceKey type pieceBound struct { epoch abi.ChainEpoch @@ -633,13 +689,21 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { continue } + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + // this really should never happen, at this point we have validated + // the piece enough times + log.Errorf("failed to get end epoch for deal %s: %v", ppi, err) + continue + } + // start bound on deal end - if boundsByEpoch[piece.deal.DealProposal.EndEpoch] == nil { - boundsByEpoch[piece.deal.DealProposal.EndEpoch] = &pieceBound{ - epoch: piece.deal.DealProposal.EndEpoch, + if boundsByEpoch[endEpoch] == nil { + boundsByEpoch[endEpoch] = &pieceBound{ + epoch: endEpoch, } } - boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart = append(boundsByEpoch[piece.deal.DealProposal.EndEpoch].boundStart, pendingPieceIndex(ppi)) + boundsByEpoch[endEpoch].boundStart = append(boundsByEpoch[endEpoch].boundStart, pendingPieceIndex(ppi)) // end bound on term max if boundsByEpoch[piece.claimTerms.claimTermEnd] == nil { @@ -662,10 +726,10 @@ func (m *Sealing) pendingPieceEpochBounds() []pieceBound { var curBoundBytes abi.UnpaddedPieceSize for i, bound := range out { for _, ppi := range bound.boundStart { - curBoundBytes += m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes += m.pendingPieces[piece.PieceKey(ppi)].size } for _, ppi := range bound.boundEnd { - curBoundBytes -= m.pendingPieces[cid.Cid(ppi)].size + curBoundBytes -= m.pendingPieces[piece.PieceKey(ppi)].size } out[i].dealBytesInBound = curBoundBytes @@ -893,18 +957,30 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO return api.SectorInfo{}, err } + nv, err := m.Api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return api.SectorInfo{}, xerrors.Errorf("getting network version: %w", err) + } + deals := make([]abi.DealID, len(info.Pieces)) pieces := make([]api.SectorPiece, len(info.Pieces)) for i, piece := range info.Pieces { - pieces[i].Piece = piece.Piece - if piece.DealInfo == nil { + pieces[i].Piece = piece.Piece() + + if !piece.HasDealInfo() { + continue + } + + pdi := piece.Impl() + if pdi.Valid(nv) != nil { continue } - pdi := *piece.DealInfo // copy pieces[i].DealInfo = &pdi - deals[i] = piece.DealInfo.DealID + if pdi.PublishCid != nil { + deals[i] = pdi.DealID + } } log := make([]api.SectorLog, len(info.Log)) @@ -955,14 +1031,4 @@ func (m *Sealing) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showO return sInfo, nil } -func proposalCID(deal api.PieceDealInfo) cid.Cid { - pc, err := deal.DealProposal.Cid() - if err != nil { - log.Errorf("DealProposal.Cid error: %+v", err) - return cid.Undef - } - - return pc -} - var _ sectorblocks.SectorBuilder = &Sealing{} diff --git a/storage/pipeline/mocks/api.go b/storage/pipeline/mocks/api.go index 5c67a1c42..a4f1cd9ef 100644 --- a/storage/pipeline/mocks/api.go +++ b/storage/pipeline/mocks/api.go @@ -9,6 +9,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" address "github.com/filecoin-project/go-address" @@ -64,6 +65,21 @@ func (mr *MockSealingAPIMockRecorder) ChainGetMessage(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainGetMessage", reflect.TypeOf((*MockSealingAPI)(nil).ChainGetMessage), arg0, arg1) } +// ChainHasObj mocks base method. +func (m *MockSealingAPI) ChainHasObj(arg0 context.Context, arg1 cid.Cid) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainHasObj", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainHasObj indicates an expected call of ChainHasObj. +func (mr *MockSealingAPIMockRecorder) ChainHasObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHasObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainHasObj), arg0, arg1) +} + // ChainHead mocks base method. func (m *MockSealingAPI) ChainHead(arg0 context.Context) (*types.TipSet, error) { m.ctrl.T.Helper() @@ -79,6 +95,20 @@ func (mr *MockSealingAPIMockRecorder) ChainHead(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainHead", reflect.TypeOf((*MockSealingAPI)(nil).ChainHead), arg0) } +// ChainPutObj mocks base method. +func (m *MockSealingAPI) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainPutObj", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChainPutObj indicates an expected call of ChainPutObj. +func (mr *MockSealingAPIMockRecorder) ChainPutObj(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPutObj", reflect.TypeOf((*MockSealingAPI)(nil).ChainPutObj), arg0, arg1) +} + // ChainReadObj mocks base method. func (m *MockSealingAPI) ChainReadObj(arg0 context.Context, arg1 cid.Cid) ([]byte, error) { m.ctrl.T.Helper() @@ -139,19 +169,34 @@ func (mr *MockSealingAPIMockRecorder) StateAccountKey(arg0, arg1, arg2 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockSealingAPI)(nil).StateAccountKey), arg0, arg1, arg2) } -// StateComputeDataCID mocks base method. -func (m *MockSealingAPI) StateComputeDataCID(arg0 context.Context, arg1 address.Address, arg2 abi.RegisteredSealProof, arg3 []abi.DealID, arg4 types.TipSetKey) (cid.Cid, error) { +// StateGetActor mocks base method. +func (m *MockSealingAPI) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorV5, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StateComputeDataCID", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(cid.Cid) + ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.ActorV5) ret1, _ := ret[1].(error) return ret0, ret1 } -// StateComputeDataCID indicates an expected call of StateComputeDataCID. -func (mr *MockSealingAPIMockRecorder) StateComputeDataCID(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +// StateGetActor indicates an expected call of StateGetActor. +func (mr *MockSealingAPIMockRecorder) StateGetActor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateComputeDataCID", reflect.TypeOf((*MockSealingAPI)(nil).StateComputeDataCID), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockSealingAPI)(nil).StateGetActor), arg0, arg1, arg2) +} + +// StateGetAllocation mocks base method. +func (m *MockSealingAPI) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockSealingAPIMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) } // StateGetAllocationForPendingDeal mocks base method. @@ -169,6 +214,21 @@ func (mr *MockSealingAPIMockRecorder) StateGetAllocationForPendingDeal(arg0, arg return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2) } +// StateGetAllocationIdForPendingDeal mocks base method. +func (m *MockSealingAPI) StateGetAllocationIdForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (verifreg.AllocationId, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocationIdForPendingDeal", arg0, arg1, arg2) + ret0, _ := ret[0].(verifreg.AllocationId) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocationIdForPendingDeal indicates an expected call of StateGetAllocationIdForPendingDeal. +func (mr *MockSealingAPIMockRecorder) StateGetAllocationIdForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationIdForPendingDeal", reflect.TypeOf((*MockSealingAPI)(nil).StateGetAllocationIdForPendingDeal), arg0, arg1, arg2) +} + // StateGetRandomnessFromBeacon mocks base method. func (m *MockSealingAPI) StateGetRandomnessFromBeacon(arg0 context.Context, arg1 crypto.DomainSeparationTag, arg2 abi.ChainEpoch, arg3 []byte, arg4 types.TipSetKey) (abi.Randomness, error) { m.ctrl.T.Helper() @@ -439,6 +499,21 @@ func (mr *MockSealingAPIMockRecorder) StateSectorPreCommitInfo(arg0, arg1, arg2, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSectorPreCommitInfo", reflect.TypeOf((*MockSealingAPI)(nil).StateSectorPreCommitInfo), arg0, arg1, arg2, arg3) } +// StateVMCirculatingSupplyInternal mocks base method. +func (m *MockSealingAPI) StateVMCirculatingSupplyInternal(arg0 context.Context, arg1 types.TipSetKey) (api.CirculatingSupply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateVMCirculatingSupplyInternal", arg0, arg1) + ret0, _ := ret[0].(api.CirculatingSupply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateVMCirculatingSupplyInternal indicates an expected call of StateVMCirculatingSupplyInternal. +func (mr *MockSealingAPIMockRecorder) StateVMCirculatingSupplyInternal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateVMCirculatingSupplyInternal", reflect.TypeOf((*MockSealingAPI)(nil).StateVMCirculatingSupplyInternal), arg0, arg1) +} + // StateWaitMsg mocks base method. func (m *MockSealingAPI) StateWaitMsg(arg0 context.Context, arg1 cid.Cid, arg2 uint64, arg3 abi.ChainEpoch, arg4 bool) (*api.MsgLookup, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/mocks/mock_precommit_batcher.go b/storage/pipeline/mocks/mock_precommit_batcher.go index 68cce7fb0..fd46f601b 100644 --- a/storage/pipeline/mocks/mock_precommit_batcher.go +++ b/storage/pipeline/mocks/mock_precommit_batcher.go @@ -103,6 +103,21 @@ func (mr *MockPreCommitBatcherApiMockRecorder) StateAccountKey(arg0, arg1, arg2 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateAccountKey", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateAccountKey), arg0, arg1, arg2) } +// StateGetAllocation mocks base method. +func (m *MockPreCommitBatcherApi) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*verifreg.Allocation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateGetAllocation indicates an expected call of StateGetAllocation. +func (mr *MockPreCommitBatcherApiMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockPreCommitBatcherApi)(nil).StateGetAllocation), arg0, arg1, arg2, arg3) +} + // StateGetAllocationForPendingDeal mocks base method. func (m *MockPreCommitBatcherApi) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) { m.ctrl.T.Helper() diff --git a/storage/pipeline/piece/cbor_gen.go b/storage/pipeline/piece/cbor_gen.go new file mode 100644 index 000000000..ccf44e54b --- /dev/null +++ b/storage/pipeline/piece/cbor_gen.go @@ -0,0 +1,451 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package piece + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" + + abi "github.com/filecoin-project/go-state-types/abi" + miner "github.com/filecoin-project/go-state-types/builtin/v13/miner" + market "github.com/filecoin-project/go-state-types/builtin/v9/market" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{166}); err != nil { + return err + } + + // t.DealID (abi.DealID) (uint64) + if len("DealID") > 8192 { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealID")); err != nil { + return err + } + + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > 8192 { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PublishCid")); err != nil { + return err + } + + if t.PublishCid == nil { + if _, err := cw.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCid(cw, *t.PublishCid); err != nil { + return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) + } + } + + // t.DealProposal (market.DealProposal) (struct) + if len("DealProposal") > 8192 { + return xerrors.Errorf("Value in field \"DealProposal\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealProposal"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealProposal")); err != nil { + return err + } + + if err := t.DealProposal.MarshalCBOR(cw); err != nil { + return err + } + + // t.DealSchedule (piece.DealSchedule) (struct) + if len("DealSchedule") > 8192 { + return xerrors.Errorf("Value in field \"DealSchedule\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealSchedule"))); err != nil { + return err + } + if _, err := cw.WriteString(string("DealSchedule")); err != nil { + return err + } + + if err := t.DealSchedule.MarshalCBOR(cw); err != nil { + return err + } + + // t.KeepUnsealed (bool) (bool) + if len("KeepUnsealed") > 8192 { + return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil { + return err + } + if _, err := cw.WriteString(string("KeepUnsealed")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil { + return err + } + + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + if len("PieceActivationManifest") > 8192 { + return xerrors.Errorf("Value in field \"PieceActivationManifest\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("PieceActivationManifest"))); err != nil { + return err + } + if _, err := cw.WriteString(string("PieceActivationManifest")); err != nil { + return err + } + + if err := t.PieceActivationManifest.MarshalCBOR(cw); err != nil { + return err + } + return nil +} + +func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) { + *t = PieceDealInfo{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(cr) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } + + } + // t.DealProposal (market.DealProposal) (struct) + case "DealProposal": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.DealProposal = new(market.DealProposal) + if err := t.DealProposal.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) + } + } + + } + // t.DealSchedule (piece.DealSchedule) (struct) + case "DealSchedule": + + { + + if err := t.DealSchedule.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err) + } + + } + // t.KeepUnsealed (bool) (bool) + case "KeepUnsealed": + + maj, extra, err = cr.ReadHeader() + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.KeepUnsealed = false + case 21: + t.KeepUnsealed = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.PieceActivationManifest (miner.PieceActivationManifest) (struct) + case "PieceActivationManifest": + + { + + b, err := cr.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := cr.UnreadByte(); err != nil { + return err + } + t.PieceActivationManifest = new(miner.PieceActivationManifest) + if err := t.PieceActivationManifest.UnmarshalCBOR(cr); err != nil { + return xerrors.Errorf("unmarshaling t.PieceActivationManifest pointer: %w", err) + } + } + + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} +func (t *DealSchedule) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + cw := cbg.NewCborWriter(w) + + if _, err := cw.Write([]byte{162}); err != nil { + return err + } + + // t.EndEpoch (abi.ChainEpoch) (int64) + if len("EndEpoch") > 8192 { + return xerrors.Errorf("Value in field \"EndEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("EndEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("EndEpoch")); err != nil { + return err + } + + if t.EndEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil { + return err + } + } + + // t.StartEpoch (abi.ChainEpoch) (int64) + if len("StartEpoch") > 8192 { + return xerrors.Errorf("Value in field \"StartEpoch\" was too long") + } + + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("StartEpoch"))); err != nil { + return err + } + if _, err := cw.WriteString(string("StartEpoch")); err != nil { + return err + } + + if t.StartEpoch >= 0 { + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil { + return err + } + } else { + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil { + return err + } + } + + return nil +} + +func (t *DealSchedule) UnmarshalCBOR(r io.Reader) (err error) { + *t = DealSchedule{} + + cr := cbg.NewCborReader(r) + + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + defer func() { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("DealSchedule: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringWithMax(cr, 8192) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.EndEpoch (abi.ChainEpoch) (int64) + case "EndEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EndEpoch = abi.ChainEpoch(extraI) + } + // t.StartEpoch (abi.ChainEpoch) (int64) + case "StartEpoch": + { + maj, extra, err := cr.ReadHeader() + if err != nil { + return err + } + var extraI int64 + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative overflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.StartEpoch = abi.ChainEpoch(extraI) + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/storage/pipeline/piece/piece_info.go b/storage/pipeline/piece/piece_info.go new file mode 100644 index 000000000..48e15751a --- /dev/null +++ b/storage/pipeline/piece/piece_info.go @@ -0,0 +1,186 @@ +package piece + +import ( + "context" + "fmt" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + "github.com/filecoin-project/lotus/chain/types" +) + +// DealInfo is a tuple of deal identity and its schedule +type PieceDealInfo struct { + // "Old" builtin-market deal info + PublishCid *cid.Cid + DealID abi.DealID + DealProposal *market.DealProposal + + // Common deal info, required for all pieces + // TODO: https://github.com/filecoin-project/lotus/issues/11237 + DealSchedule DealSchedule + + // Direct Data Onboarding + // When PieceActivationManifest is set, builtin-market deal info must not be set + PieceActivationManifest *miner.PieceActivationManifest + + // Best-effort deal asks + KeepUnsealed bool +} + +// DealSchedule communicates the time interval of a storage deal. The deal must +// appear in a sealed (proven) sector no later than StartEpoch, otherwise it +// is invalid. +type DealSchedule struct { + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch +} + +func (ds *PieceDealInfo) isBuiltinMarketDeal() bool { + return ds.PublishCid != nil +} + +// Valid validates the deal info after being accepted through RPC, checks that +// the deal metadata is well-formed. +func (ds *PieceDealInfo) Valid(nv network.Version) error { + hasLegacyDealInfo := ds.PublishCid != nil && ds.DealID != 0 && ds.DealProposal != nil + hasPieceActivationManifest := ds.PieceActivationManifest != nil + + if hasLegacyDealInfo && hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has both legacy deal info and piece activation manifest") + } + + if !hasLegacyDealInfo && !hasPieceActivationManifest { + return xerrors.Errorf("piece deal info has neither legacy deal info nor piece activation manifest") + } + + if hasLegacyDealInfo { + if _, err := ds.DealProposal.Cid(); err != nil { + return xerrors.Errorf("checking proposal CID: %w", err) + } + } + + if ds.DealSchedule.StartEpoch <= 0 { + return xerrors.Errorf("invalid deal start epoch %d", ds.DealSchedule.StartEpoch) + } + if ds.DealSchedule.EndEpoch <= 0 { + return xerrors.Errorf("invalid deal end epoch %d", ds.DealSchedule.EndEpoch) + } + if ds.DealSchedule.EndEpoch <= ds.DealSchedule.StartEpoch { + return xerrors.Errorf("invalid deal end epoch %d (start %d)", ds.DealSchedule.EndEpoch, ds.DealSchedule.StartEpoch) + } + + if hasPieceActivationManifest { + if nv < network.Version22 { + return xerrors.Errorf("direct-data-onboarding pieces aren't accepted before network version 22") + } + + // todo any more checks seem reasonable to put here? + } + + return nil +} + +type AllocationAPI interface { + StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) +} + +func (ds *PieceDealInfo) GetAllocation(ctx context.Context, aapi AllocationAPI, tsk types.TipSetKey) (*verifregtypes.Allocation, error) { + switch { + case ds.isBuiltinMarketDeal(): + return aapi.StateGetAllocationForPendingDeal(ctx, ds.DealID, tsk) + default: + if ds.PieceActivationManifest.VerifiedAllocationKey == nil { + return nil, nil + } + + caddr, err := address.NewIDAddress(uint64(ds.PieceActivationManifest.VerifiedAllocationKey.Client)) + if err != nil { + return nil, err + } + + all, err := aapi.StateGetAllocation(ctx, caddr, verifregtypes.AllocationId(ds.PieceActivationManifest.VerifiedAllocationKey.ID), tsk) + if err != nil { + return nil, err + } + + if all == nil { + return nil, nil + } + + if all.Client != ds.PieceActivationManifest.VerifiedAllocationKey.Client { + return nil, xerrors.Errorf("allocation client mismatch: %d != %d", all.Client, ds.PieceActivationManifest.VerifiedAllocationKey.Client) + } + + return all, nil + } +} + +// StartEpoch returns the last epoch in which the sector containing this deal +// must be sealed (committed) in order for the deal to be valid. +func (ds *PieceDealInfo) StartEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.StartEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.StartEpoch, nil + } +} + +// EndEpoch returns the minimum epoch until which the sector containing this +// deal must be committed until. +func (ds *PieceDealInfo) EndEpoch() (abi.ChainEpoch, error) { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealSchedule.EndEpoch, nil + default: + // note - when implementing make sure to cache any dynamically computed values + // todo do we want a smarter mechanism here + return ds.DealSchedule.EndEpoch, nil + } +} + +func (ds *PieceDealInfo) PieceCID() cid.Cid { + switch { + case ds.isBuiltinMarketDeal(): + return ds.DealProposal.PieceCID + default: + return ds.PieceActivationManifest.CID + } +} + +func (ds *PieceDealInfo) String() string { + switch { + case ds.isBuiltinMarketDeal(): + return fmt.Sprintf("BuiltinMarket{DealID: %d, PieceCID: %s, PublishCid: %s}", ds.DealID, ds.DealProposal.PieceCID, ds.PublishCid) + default: + // todo check that VAlloc doesn't print as a pointer + return fmt.Sprintf("DirectDataOnboarding{PieceCID: %s, VAllloc: %x}", ds.PieceActivationManifest.CID, ds.PieceActivationManifest.VerifiedAllocationKey) + } +} + +func (ds *PieceDealInfo) KeepUnsealedRequested() bool { + return ds.KeepUnsealed +} + +type PieceKey string + +// Key returns a unique identifier for this deal info, for use in maps. +func (ds *PieceDealInfo) Key() PieceKey { + return PieceKey(ds.String()) +} + +func (ds *PieceDealInfo) Impl() PieceDealInfo { + return *ds +} diff --git a/storage/pipeline/pledge.go b/storage/pipeline/pledge.go new file mode 100644 index 000000000..04567fca1 --- /dev/null +++ b/storage/pipeline/pledge.go @@ -0,0 +1,114 @@ +package sealing + +import ( + "context" + + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/types" +) + +var initialPledgeNum = types.NewInt(110) +var initialPledgeDen = types.NewInt(100) + +func (m *Sealing) pledgeForPower(ctx context.Context, addedPower abi.StoragePower) (abi.TokenAmount, error) { + store := adt.WrapStore(ctx, cbor.NewCborStore(bstore.NewAPIBlockstore(m.Api))) + + // load power actor + var ( + powerSmoothed builtin.FilterEstimate + pledgeCollateral abi.TokenAmount + ) + if act, err := m.Api.StateGetActor(ctx, power.Address, types.EmptyTSK); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor: %w", err) + } else if s, err := power.Load(store, act); err != nil { + return types.EmptyInt, xerrors.Errorf("loading power actor state: %w", err) + } else if p, err := s.TotalPowerSmoothed(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine total power: %w", err) + } else if c, err := s.TotalLocked(); err != nil { + return types.EmptyInt, xerrors.Errorf("failed to determine pledge collateral: %w", err) + } else { + powerSmoothed = p + pledgeCollateral = c + } + + // load reward actor + rewardActor, err := m.Api.StateGetActor(ctx, reward.Address, types.EmptyTSK) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor: %w", err) + } + + rewardState, err := reward.Load(store, rewardActor) + if err != nil { + return types.EmptyInt, xerrors.Errorf("loading reward actor state: %w", err) + } + + // get circulating supply + circSupply, err := m.Api.StateVMCirculatingSupplyInternal(ctx, types.EmptyTSK) + if err != nil { + return big.Zero(), xerrors.Errorf("getting circulating supply: %w", err) + } + + // do the calculation + initialPledge, err := rewardState.InitialPledgeForPower( + addedPower, + pledgeCollateral, + &powerSmoothed, + circSupply.FilCirculating, + ) + if err != nil { + return big.Zero(), xerrors.Errorf("calculating initial pledge: %w", err) + } + + return types.BigDiv(types.BigMul(initialPledge, initialPledgeNum), initialPledgeDen), nil +} + +func (m *Sealing) sectorWeight(ctx context.Context, sector SectorInfo, expiration abi.ChainEpoch) (abi.StoragePower, error) { + spt, err := m.currentSealProof(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting seal proof type: %w", err) + } + + ssize, err := spt.SectorSize() + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting sector size: %w", err) + } + + ts, err := m.Api.ChainHead(ctx) + if err != nil { + return types.EmptyInt, xerrors.Errorf("getting chain head: %w", err) + } + + // get verified deal infos + var w, vw = big.Zero(), big.Zero() + + for _, piece := range sector.Pieces { + if !piece.HasDealInfo() { + // todo StateMinerInitialPledgeCollateral doesn't add cc/padding to non-verified weight, is that correct? + continue + } + + alloc, err := piece.GetAllocation(ctx, m.Api, ts.Key()) + if err != nil || alloc == nil { + w = big.Add(w, abi.NewStoragePower(int64(piece.Piece().Size))) + continue + } + + vw = big.Add(vw, abi.NewStoragePower(int64(piece.Piece().Size))) + } + + // load market actor + duration := expiration - ts.Height() + sectorWeight := builtin.QAPowerForWeight(ssize, duration, w, vw) + + return sectorWeight, nil +} diff --git a/storage/pipeline/precommit_batch.go b/storage/pipeline/precommit_batch.go index 3a86c8628..099988010 100644 --- a/storage/pipeline/precommit_batch.go +++ b/storage/pipeline/precommit_batch.go @@ -36,6 +36,7 @@ type PreCommitBatcherApi interface { ChainHead(ctx context.Context) (*types.TipSet, error) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -428,11 +429,18 @@ func (b *PreCommitBatcher) Stop(ctx context.Context) error { func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { cutoffEpoch := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { + continue + } + + startEpoch, err := p.StartEpoch() + if err != nil { + // almost definitely can't happen, but if it does there's less harm in + // just logging the error and moving on + log.Errorw("failed to get deal start epoch", "error", err) continue } - startEpoch := p.DealInfo.DealSchedule.StartEpoch if startEpoch < cutoffEpoch { cutoffEpoch = startEpoch } @@ -444,15 +452,19 @@ func getDealStartCutoff(si SectorInfo) abi.ChainEpoch { func (b *PreCommitBatcher) getAllocationCutoff(si SectorInfo) abi.ChainEpoch { cutoff := si.TicketEpoch + policy.MaxPreCommitRandomnessLookback for _, p := range si.Pieces { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - alloc, _ := b.api.StateGetAllocationForPendingDeal(b.mctx, p.DealInfo.DealID, types.EmptyTSK) + alloc, err := p.GetAllocation(b.mctx, b.api, types.EmptyTSK) + if err != nil { + log.Errorw("failed to get deal allocation", "error", err) + } // alloc is nil if this is not a verified deal in nv17 or later if alloc == nil { continue } + if alloc.Expiration < cutoff { cutoff = alloc.Expiration } diff --git a/storage/pipeline/precommit_batch_test.go b/storage/pipeline/precommit_batch_test.go deleted file mode 100644 index 1f3aaf244..000000000 --- a/storage/pipeline/precommit_batch_test.go +++ /dev/null @@ -1,291 +0,0 @@ -// stm: #unit -package sealing_test - -import ( - "bytes" - "context" - "sort" - "sync" - "testing" - "time" - - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner" - "github.com/filecoin-project/go-state-types/network" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/config" - pipeline "github.com/filecoin-project/lotus/storage/pipeline" - "github.com/filecoin-project/lotus/storage/pipeline/mocks" - "github.com/filecoin-project/lotus/storage/pipeline/sealiface" -) - -var fc = config.MinerFeeConfig{ - MaxPreCommitGasFee: types.FIL(types.FromFil(1)), - MaxCommitGasFee: types.FIL(types.FromFil(1)), - MaxTerminateGasFee: types.FIL(types.FromFil(1)), - MaxPreCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, - MaxCommitBatchGasFee: config.BatchFeeConfig{Base: types.FIL(types.FromFil(3)), PerSector: types.FIL(types.FromFil(1))}, -} - -func TestPrecommitBatcher(t *testing.T) { - //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - ctx := context.Background() - - as := asel(func(ctx context.Context, mi api.MinerInfo, use api.AddrUse, goodFunds, minFunds abi.TokenAmount) (address.Address, abi.TokenAmount, error) { - return t0123, big.Zero(), nil - }) - - maxBatch := miner6.PreCommitSectorBatchMaxSize - - cfg := func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 0, - MaxSealingSectorsForDeals: 0, - WaitDealsDelay: time.Hour * 6, - AlwaysKeepUnsealedCopy: true, - - MaxPreCommitBatch: maxBatch, - PreCommitBatchWait: 24 * time.Hour, - PreCommitBatchSlack: 3 * time.Hour, - BatchPreCommitAboveBaseFee: big.NewInt(10000), - - AggregateCommits: true, - MinCommitBatch: miner6.MinAggregatedSectors, - MaxCommitBatch: miner6.MaxAggregatedSectors, - CommitBatchWait: 24 * time.Hour, - CommitBatchSlack: 1 * time.Hour, - - TerminateBatchMin: 1, - TerminateBatchMax: 100, - TerminateBatchWait: 5 * time.Minute, - }, nil - } - - type promise func(t *testing.T) - type action func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise - - actions := func(as ...action) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var ps []promise - for _, a := range as { - p := a(t, s, pcb) - if p != nil { - ps = append(ps, p) - } - } - - if len(ps) > 0 { - return func(t *testing.T) { - for _, p := range ps { - p(t) - } - } - } - return nil - } - } - - addSector := func(sn abi.SectorNumber, aboveBalancer bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - var pcres sealiface.PreCommitBatchRes - var pcerr error - done := sync.Mutex{} - done.Lock() - - si := pipeline.SectorInfo{ - SectorNumber: sn, - } - - basefee := big.NewInt(9999) - if aboveBalancer { - basefee = big.NewInt(10001) - } - - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, basefee, 1), nil).MaxTimes(2) // once in AddPreCommit - - go func() { - defer done.Unlock() - pcres, pcerr = pcb.AddPreCommit(ctx, si, big.Zero(), &minertypes.SectorPreCommitInfo{ - SectorNumber: si.SectorNumber, - SealedCID: fakePieceCid(t), - DealIDs: nil, - Expiration: 0, - }) - }() - - return func(t *testing.T) { - done.Lock() - require.NoError(t, pcerr) - require.Empty(t, pcres.Error) - require.Contains(t, pcres.Sectors, si.SectorNumber) - } - } - } - - addSectors := func(sectors []abi.SectorNumber, aboveBalancer bool) action { - as := make([]action, len(sectors)) - for i, sector := range sectors { - as[i] = addSector(sector, aboveBalancer) - } - return actions(as...) - } - - waitPending := func(n int) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - require.Eventually(t, func() bool { - p, err := pcb.Pending(ctx) - require.NoError(t, err) - return len(p) == n - }, time.Second*5, 10*time.Millisecond) - - return nil - } - } - - //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 - expectSend := func(expect []abi.SectorNumber, gasOverLimit bool) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(api.MinerInfo{Owner: t0123, Worker: t0123}, nil) - if gasOverLimit { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, &api.ErrOutOfGas{}) - } else { - s.EXPECT().GasEstimateMessageGas(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&types.Message{GasLimit: 100000}, nil) - } - - if !gasOverLimit { - s.EXPECT().MpoolPushMessage(gomock.Any(), funMatcher(func(i interface{}) bool { - b := i.(*types.Message) - var params miner6.PreCommitSectorBatchParams - require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(b.Params))) - for s, number := range expect { - require.Equal(t, number, params.Sectors[s].SectorNumber) - } - return true - }), gomock.Any()).Return(dummySmsg, nil) - } - return nil - } - } - - expectInitialCalls := func() action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - s.EXPECT().ChainHead(gomock.Any()).Return(makeBFTs(t, big.NewInt(10001), 1), nil) - return nil - } - } - - flush := func(expect []abi.SectorNumber) action { - return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *pipeline.PreCommitBatcher) promise { - _ = expectInitialCalls()(t, s, pcb) - _ = expectSend(expect, false)(t, s, pcb) - - r, err := pcb.Flush(ctx) - require.NoError(t, err) - require.Len(t, r, 1) - require.Empty(t, r[0].Error) - sort.Slice(r[0].Sectors, func(i, j int) bool { - return r[0].Sectors[i] < r[0].Sectors[j] - }) - require.Equal(t, expect, r[0].Sectors) - - return nil - } - } - - getSectors := func(n int) []abi.SectorNumber { - out := make([]abi.SectorNumber, n) - for i := range out { - out[i] = abi.SectorNumber(i) - } - return out - } - - tcs := map[string]struct { - actions []action - }{ - "addSingle": { - actions: []action{ - addSector(0, true), - waitPending(1), - flush([]abi.SectorNumber{0}), - }, - }, - "addMax": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addMax-gasAboveLimit": { - actions: []action{ - expectInitialCalls(), - expectSend(getSectors(maxBatch), true), - expectSend(getSectors(maxBatch)[:maxBatch/2], false), - expectSend(getSectors(maxBatch)[maxBatch/2:], false), - addSectors(getSectors(maxBatch), true), - }, - }, - "addOne-belowBaseFee": { - actions: []action{ - expectSend(getSectors(1), false), - addSectors(getSectors(1), false), - }, - }, - } - - for name, tc := range tcs { - tc := tc - - t.Run(name, func(t *testing.T) { - // create go mock controller here - mockCtrl := gomock.NewController(t) - // when test is done, assert expectations on all mock objects. - defer mockCtrl.Finish() - - // create them mocks - pcapi := mocks.NewMockPreCommitBatcherApi(mockCtrl) - pcapi.EXPECT().StateNetworkVersion(gomock.Any(), gomock.Any()).Return(network.Version20, nil).AnyTimes() - - pcb := pipeline.NewPreCommitBatcher(ctx, t0123, pcapi, as, fc, cfg) - - var promises []promise - - for _, a := range tc.actions { - p := a(t, pcapi, pcb) - if p != nil { - promises = append(promises, p) - } - } - - for _, p := range promises { - p(t) - } - - err := pcb.Stop(ctx) - require.NoError(t, err) - }) - } -} - -type funMatcher func(interface{}) bool - -func (funMatcher) Matches(interface{}) bool { - return true -} - -func (funMatcher) String() string { - return "fun" -} diff --git a/storage/pipeline/precommit_policy.go b/storage/pipeline/precommit_policy.go index 6e234f930..6df44d407 100644 --- a/storage/pipeline/precommit_policy.go +++ b/storage/pipeline/precommit_policy.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/builtin/v8/miner" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -17,7 +16,7 @@ import ( ) type PreCommitPolicy interface { - Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) + Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) } type Chain interface { @@ -60,7 +59,7 @@ func NewBasicPreCommitPolicy(api Chain, cfgGetter dtypes.GetSealingConfigFunc, p // Expiration produces the pre-commit sector expiration epoch for an encoded // replica containing the provided enumeration of pieces and deals. -func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorPiece) (abi.ChainEpoch, error) { +func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...SafeSectorPiece) (abi.ChainEpoch, error) { ts, err := p.api.ChainHead(ctx) if err != nil { return 0, err @@ -69,17 +68,22 @@ func (p *BasicPreCommitPolicy) Expiration(ctx context.Context, ps ...api.SectorP var end *abi.ChainEpoch for _, p := range ps { - if p.DealInfo == nil { + if !p.HasDealInfo() { continue } - if p.DealInfo.DealSchedule.EndEpoch < ts.Height() { + endEpoch, err := p.EndEpoch() + if err != nil { + return 0, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if endEpoch < ts.Height() { log.Warnf("piece schedule %+v ended before current epoch %d", p, ts.Height()) continue } - if end == nil || *end < p.DealInfo.DealSchedule.EndEpoch { - tmp := p.DealInfo.DealSchedule.EndEpoch + if end == nil || *end < endEpoch { + tmp := endEpoch end = &tmp } } diff --git a/storage/pipeline/precommit_policy_test.go b/storage/pipeline/precommit_policy_test.go index 7865560de..6329e90d3 100644 --- a/storage/pipeline/precommit_policy_test.go +++ b/storage/pipeline/precommit_policy_test.go @@ -11,15 +11,19 @@ import ( commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" pipeline "github.com/filecoin-project/lotus/storage/pipeline" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" ) @@ -47,6 +51,39 @@ func (f *fakeChain) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey return build.TestNetworkVersion, nil } +func makeBFTs(t *testing.T, basefee abi.TokenAmount, h abi.ChainEpoch) *types.TipSet { + dummyCid, _ := cid.Parse("bafkqaaa") + + var ts, err = types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: builtin.SystemActorAddr, + + Parents: []cid.Cid{}, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: dummyCid, + Messages: dummyCid, + ParentMessageReceipts: dummyCid, + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + + ParentBaseFee: basefee, + }, + }) + if t != nil { + require.NoError(t, err) + } + + return ts +} + +func makeTs(t *testing.T, h abi.ChainEpoch) *types.TipSet { + return makeBFTs(t, big.NewInt(0), h) +} + func (f *fakeChain) ChainHead(ctx context.Context) (*types.TipSet, error) { return makeTs(nil, f.h), nil } @@ -58,6 +95,10 @@ func fakePieceCid(t *testing.T) cid.Cid { return fakePieceCid } +func cidPtr(c cid.Cid) *cid.Cid { + return &c +} + func TestBasicPolicyEmptySector(t *testing.T) { cfg := fakeConfigGetter(nil) h := abi.ChainEpoch(55) @@ -97,33 +138,35 @@ func TestBasicPolicyMostConstrictiveSchedule(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 2) longestDealEpochEnd := abi.ChainEpoch(547300) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(42), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(42), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(70), EndEpoch: abi.ChainEpoch(547275), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(43), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(43), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(80), EndEpoch: longestDealEpochEnd, }, }, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) @@ -138,20 +181,21 @@ func TestBasicPolicyIgnoresExistingScheduleIfExpired(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(10), }, }, - }, + }), } exp, err := pcp.Expiration(context.Background(), pieces...) @@ -170,27 +214,28 @@ func TestMissingDealIsIgnored(t *testing.T) { h: abi.ChainEpoch(55), }, cfg, 0) - pieces := []api.SectorPiece{ - { + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, - DealInfo: &api.PieceDealInfo{ - DealID: abi.DealID(44), - DealSchedule: api.DealSchedule{ + DealInfo: &piece.PieceDealInfo{ + PublishCid: cidPtr(fakePieceCid(t)), // pretend this is a valid builtin-market deal + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ StartEpoch: abi.ChainEpoch(1), EndEpoch: abi.ChainEpoch(547300), }, }, - }, - { + }), + pipeline.SafePiece(api.SectorPiece{ Piece: abi.PieceInfo{ Size: abi.PaddedPieceSize(1024), PieceCID: fakePieceCid(t), }, DealInfo: nil, - }, + }), } exp, err := policy.Expiration(context.Background(), pieces...) @@ -198,3 +243,37 @@ func TestMissingDealIsIgnored(t *testing.T) { assert.Equal(t, 547300, int(exp)) } + +func TestBasicPolicyDDO(t *testing.T) { + cfg := fakeConfigGetter(nil) + pcp := pipeline.NewBasicPreCommitPolicy(&fakeChain{ + h: abi.ChainEpoch(55), + }, cfg, 0) + + pieces := []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api.SectorPiece{ + Piece: abi.PieceInfo{ + Size: abi.PaddedPieceSize(1024), + PieceCID: fakePieceCid(t), + }, + DealInfo: &piece.PieceDealInfo{ + PublishCid: nil, + DealID: abi.DealID(44), + DealSchedule: piece.DealSchedule{ + StartEpoch: abi.ChainEpoch(100_000), + EndEpoch: abi.ChainEpoch(1500_000), + }, + PieceActivationManifest: &miner.PieceActivationManifest{ + Size: 0, + VerifiedAllocationKey: nil, + Notify: nil, + }, + }, + }), + } + + exp, err := pcp.Expiration(context.Background(), pieces...) + require.NoError(t, err) + + assert.Equal(t, abi.ChainEpoch(1500_000), exp) +} diff --git a/storage/pipeline/receive.go b/storage/pipeline/receive.go index 8427eba54..231afbc39 100644 --- a/storage/pipeline/receive.go +++ b/storage/pipeline/receive.go @@ -86,6 +86,11 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta return SectorInfo{}, xerrors.Errorf("getting chain head: %w", err) } + nv, err := m.Api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return SectorInfo{}, xerrors.Errorf("getting network version: %w", err) + } + var info SectorInfo var validatePoRep bool @@ -217,9 +222,24 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.State = ReceiveSector info.SectorNumber = meta.Sector.Number - info.Pieces = meta.Pieces + info.Pieces = make([]SafeSectorPiece, len(meta.Pieces)) info.SectorType = meta.Type + for i, piece := range meta.Pieces { + info.Pieces[i] = SafeSectorPiece{ + real: piece, + } + + if !info.Pieces[i].HasDealInfo() { + continue // cc + } + + err := info.Pieces[i].DealInfo().Valid(nv) + if err != nil { + return SectorInfo{}, xerrors.Errorf("piece %d deal info invalid: %w", i, err) + } + } + if meta.RemoteSealingDoneEndpoint != "" { // validate the url if _, err := url.Parse(meta.RemoteSealingDoneEndpoint); err != nil { @@ -229,7 +249,7 @@ func (m *Sealing) checkSectorMeta(ctx context.Context, meta api.RemoteSectorMeta info.RemoteSealingDoneEndpoint = meta.RemoteSealingDoneEndpoint } - if err := checkPieces(ctx, m.maddr, meta.Sector.Number, meta.Pieces, m.Api, false); err != nil { + if err := checkPieces(ctx, m.maddr, meta.Sector.Number, info.Pieces, m.Api, false); err != nil { return SectorInfo{}, xerrors.Errorf("checking pieces: %w", err) } diff --git a/storage/pipeline/sealiface/config.go b/storage/pipeline/sealiface/config.go index e41b143ec..2ac6e0d58 100644 --- a/storage/pipeline/sealiface/config.go +++ b/storage/pipeline/sealiface/config.go @@ -62,4 +62,9 @@ type Config struct { TerminateBatchWait time.Duration UseSyntheticPoRep bool + + RequireActivationSuccess bool + RequireActivationSuccessUpdate bool + RequireNotificationSuccess bool + RequireNotificationSuccessUpdate bool } diff --git a/storage/pipeline/sealing.go b/storage/pipeline/sealing.go index 936bd8b39..75791fae8 100644 --- a/storage/pipeline/sealing.go +++ b/storage/pipeline/sealing.go @@ -5,6 +5,7 @@ import ( "sync" "time" + blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" @@ -25,12 +26,15 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" + "github.com/filecoin-project/lotus/lib/result" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/ctladdr" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/pipeline/sealiface" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" @@ -49,7 +53,6 @@ type SealingAPI interface { StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) - StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tsk types.TipSetKey) (*lminer.SectorLocation, error) StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) @@ -72,6 +75,13 @@ type SealingAPI interface { ChainReadObj(context.Context, cid.Cid) ([]byte, error) StateMinerAllocated(context.Context, address.Address, types.TipSetKey) (*bitfield.BitField, error) StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + StateGetAllocationIdForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (verifreg.AllocationId, error) + StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) + + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + StateVMCirculatingSupplyInternal(ctx context.Context, tsk types.TipSetKey) (api.CirculatingSupply, error) + ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) + ChainPutObj(ctx context.Context, block blocks.Block) error // Address selector WalletBalance(context.Context, address.Address) (types.BigInt, error) @@ -110,8 +120,8 @@ type Sealing struct { inputLk sync.Mutex openSectors map[abi.SectorID]*openSector sectorTimers map[abi.SectorID]*time.Timer - pendingPieces map[cid.Cid]*pendingPiece - assignedPieces map[abi.SectorID][]cid.Cid + pendingPieces map[piece.PieceKey]*pendingPiece + assignedPieces map[abi.SectorID][]piece.PieceKey nextDealSector *abi.SectorNumber // used to prevent a race where we could create a new sector more than once available map[abi.SectorID]struct{} @@ -139,16 +149,16 @@ type openSector struct { number abi.SectorNumber ccUpdate bool - maybeAccept func(cid.Cid) error // called with inputLk + maybeAccept func(key piece.PieceKey) error // called with inputLk } func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, error) { log := log.With( "sector", o.number, - "deal", piece.deal.DealID, - "dealEnd", piece.deal.DealProposal.EndEpoch, - "dealStart", piece.deal.DealProposal.StartEpoch, + "piece", piece.deal.String(), + "dealEnd", result.Wrap(piece.deal.EndEpoch()), + "dealStart", result.Wrap(piece.deal.StartEpoch()), "dealClaimEnd", piece.claimTerms.claimTermEnd, "lastAssignedDealEnd", o.lastDealEnd, @@ -181,7 +191,12 @@ func (o *openSector) checkDealAssignable(piece *pendingPiece, expF expFn) (bool, return false, nil } - if sectorExpiration < piece.deal.DealProposal.EndEpoch { + endEpoch, err := piece.deal.EndEpoch() + if err != nil { + return false, xerrors.Errorf("failed to get end epoch: %w", err) + } + + if sectorExpiration < endEpoch { log.Debugw("deal not assignable to sector", "reason", "sector expiration less than deal expiration") return false, nil } @@ -205,7 +220,7 @@ type pendingPiece struct { resp *pieceAcceptResp size abi.UnpaddedPieceSize - deal api.PieceDealInfo + deal UniversalPieceInfo claimTerms pieceClaimBounds @@ -215,10 +230,10 @@ type pendingPiece struct { accepted func(abi.SectorNumber, abi.UnpaddedPieceSize, error) } -func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { +func New(mctx context.Context, sapi SealingAPI, fc config.MinerFeeConfig, events Events, maddr address.Address, ds datastore.Batching, sealer sealer.SectorManager, verif storiface.Verifier, prov storiface.Prover, pcp PreCommitPolicy, gc dtypes.GetSealingConfigFunc, journal journal.Journal, addrSel AddressSelector) *Sealing { s := &Sealing{ - Api: api, - DealInfo: &CurrentDealInfoManager{api}, + Api: sapi, + DealInfo: &CurrentDealInfoManager{sapi}, ds: ds, @@ -232,8 +247,8 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events openSectors: map[abi.SectorID]*openSector{}, sectorTimers: map[abi.SectorID]*time.Timer{}, - pendingPieces: map[cid.Cid]*pendingPiece{}, - assignedPieces: map[abi.SectorID][]cid.Cid{}, + pendingPieces: map[piece.PieceKey]*pendingPiece{}, + assignedPieces: map[abi.SectorID][]piece.PieceKey{}, available: map[abi.SectorID]struct{}{}, @@ -242,9 +257,9 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events addrSel: addrSel, - terminator: NewTerminationBatcher(mctx, maddr, api, addrSel, fc, gc), - precommiter: NewPreCommitBatcher(mctx, maddr, api, addrSel, fc, gc), - commiter: NewCommitBatcher(mctx, maddr, api, addrSel, fc, gc, prov), + terminator: NewTerminationBatcher(mctx, maddr, sapi, addrSel, fc, gc), + precommiter: NewPreCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc), + commiter: NewCommitBatcher(mctx, maddr, sapi, addrSel, fc, gc, prov), getConfig: gc, diff --git a/storage/pipeline/sector_state.go b/storage/pipeline/sector_state.go index e1f5bfd69..9e7f75171 100644 --- a/storage/pipeline/sector_state.go +++ b/storage/pipeline/sector_state.go @@ -94,7 +94,7 @@ const ( CommitFinalizeFailed SectorState = "CommitFinalizeFailed" // single commit - SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain + SubmitCommit SectorState = "SubmitCommit" // send commit message to the chain (deprecated) CommitWait SectorState = "CommitWait" // wait for the commit message to land on chain SubmitCommitAggregate SectorState = "SubmitCommitAggregate" diff --git a/storage/pipeline/states_failed.go b/storage/pipeline/states_failed.go index 3323c4c9b..3e4ea4dde 100644 --- a/storage/pipeline/states_failed.go +++ b/storage/pipeline/states_failed.go @@ -235,7 +235,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { switch err.(type) { case *ErrApi: log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) @@ -265,7 +265,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -466,7 +466,7 @@ func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo // failWith is a mutator or global mutator func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error { - toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) + toFix, nonBuiltinMarketPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) if err != nil { return err } @@ -478,33 +478,35 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto updates := map[int]abi.DealID{} for _, i := range toFix { + // note: all toFix pieces are builtin-market pieces + p := sector.Pieces[i] - if p.DealInfo.PublishCid == nil { + if p.Impl().PublishCid == nil { // TODO: check if we are in an early enough state try to remove this piece - log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) + log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID) // Not much to do here (and this can only happen for old spacerace sectors) return ctx.Send(failWith) } var dp *market.DealProposal - if p.DealInfo.DealProposal != nil { - mdp := *p.DealInfo.DealProposal + if p.Impl().DealProposal != nil { + mdp := *p.Impl().DealProposal dp = &mdp } - res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.DealInfo.PublishCid) + res, err := m.DealInfo.GetCurrentDealInfo(ctx.Context(), ts.Key(), dp, *p.Impl().PublishCid) if err != nil { failed[i] = xerrors.Errorf("getting current deal info for piece %d: %w", i, err) continue } if res.MarketDeal == nil { - failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID) + failed[i] = xerrors.Errorf("nil market deal (%d,%d,%d,%s)", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID) continue } - if res.MarketDeal.Proposal.PieceCID != p.Piece.PieceCID { - failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, res.MarketDeal.Proposal.PieceCID) + if res.MarketDeal.Proposal.PieceCID != p.PieceCID() { + failed[i] = xerrors.Errorf("recovered piece (%d) deal in sector %d (dealid %d) has different PieceCID %s != %s", i, sector.SectorNumber, p.Impl().DealID, p.Impl().DealProposal.PieceCID, res.MarketDeal.Proposal.PieceCID) continue } @@ -517,7 +519,7 @@ func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, secto merr = multierror.Append(merr, e) } - if len(failed)+paddingPieces == len(sector.Pieces) { + if len(failed)+nonBuiltinMarketPieces == len(sector.Pieces) { log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) return ctx.Send(failWith) } @@ -542,6 +544,7 @@ func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{xerrors.New("failed recovering deal ids")}) } +// recoveryPiecesToFix returns the list of sector piece indexes to fix, and the number of non-builtin-market pieces func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { ts, err := api.ChainHead(ctx) if err != nil { @@ -549,51 +552,68 @@ func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, } var toFix []int - paddingPieces := 0 + nonBuiltinMarketPieces := 0 for i, p := range sector.Pieces { - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) - } - paddingPieces++ - continue - } + i, p := i, p + + err := p.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + exp := zerocomm.ZeroPieceCommitment(p.Piece().Size.Unpadded()) + if !info.PieceCID().Equals(exp) { + return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece().PieceCID) + } + nonBuiltinMarketPieces++ + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo().Impl().DealID, ts.Key()) + if err != nil { + log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo().Impl().DealID, i, err) + toFix = append(toFix, i) + return nil + } + + if deal.Proposal.Provider != maddr { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.Provider, maddr) + toFix = append(toFix, i) + return nil + } + + if deal.Proposal.PieceCID != p.Piece().PieceCID { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().PieceCID, deal.Proposal.PieceCID) + toFix = append(toFix, i) + return nil + } + + if p.Piece().Size != deal.Proposal.PieceSize { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, p.Piece().Size, deal.Proposal.PieceSize) + toFix = append(toFix, i) + return nil + } + + if ts.Height() >= deal.Proposal.StartEpoch { + // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces + // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) + return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.Impl().DealID, deal.Proposal.StartEpoch, ts.Height()) + } + + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + // DDO pieces have no repair strategy + + nonBuiltinMarketPieces++ + return nil + }, + }) - deal, err := api.StateMarketStorageDeal(ctx, p.DealInfo.DealID, ts.Key()) if err != nil { - log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) - toFix = append(toFix, i) - continue - } - - if deal.Proposal.Provider != maddr { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.Provider, maddr) - toFix = append(toFix, i) - continue - } - - if deal.Proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, deal.Proposal.PieceCID) - toFix = append(toFix, i) - continue - } - - if p.Piece.Size != deal.Proposal.PieceSize { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, deal.Proposal.PieceSize) - toFix = append(toFix, i) - continue - } - - if ts.Height() >= deal.Proposal.StartEpoch { - // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces - // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) - return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, deal.Proposal.StartEpoch, ts.Height()) + return nil, 0, xerrors.Errorf("checking piece %d: %w", i, err) } } - return toFix, paddingPieces, nil + return toFix, nonBuiltinMarketPieces, nil } diff --git a/storage/pipeline/states_failed_test.go b/storage/pipeline/states_failed_test.go index f6846c8f5..bc658d59b 100644 --- a/storage/pipeline/states_failed_test.go +++ b/storage/pipeline/states_failed_test.go @@ -23,6 +23,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" pipeline "github.com/filecoin-project/lotus/storage/pipeline" "github.com/filecoin-project/lotus/storage/pipeline/mocks" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestStateRecoverDealIDs(t *testing.T) { @@ -76,16 +77,16 @@ func TestStateRecoverDealIDs(t *testing.T) { // TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build. // https://github.com/filecoin-project/lotus/issues/7867 err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, pipeline.SectorInfo{ - Pieces: []api2.SectorPiece{ - { - DealInfo: &api2.PieceDealInfo{ + Pieces: []pipeline.SafeSectorPiece{ + pipeline.SafePiece(api2.SectorPiece{ + DealInfo: &piece.PieceDealInfo{ DealID: dealId, PublishCid: &pc, }, Piece: abi.PieceInfo{ PieceCID: idCid("oldPieceCID"), }, - }, + }), }, }) require.NoError(t, err) diff --git a/storage/pipeline/states_replica_update.go b/storage/pipeline/states_replica_update.go index 6717f49a6..a0d92891c 100644 --- a/storage/pipeline/states_replica_update.go +++ b/storage/pipeline/states_replica_update.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/builtin" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/lotus/api" @@ -22,7 +23,7 @@ import ( func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { // if the sector ended up not having any deals, abort the upgrade - if !sector.hasDeals() { + if !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -58,7 +59,7 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } if !active { err := xerrors.Errorf("sector marked for upgrade %d no longer active, aborting upgrade", sector.SectorNumber) - log.Errorf(err.Error()) + log.Errorf("%s", err) return ctx.Send(SectorAbortUpgrade{err}) } @@ -82,14 +83,13 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect } func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { - ts, err := m.Api.ChainHead(ctx.Context()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) return nil } - if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, ts.Key(), m.Api); err != nil { + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, m.Api); err != nil { return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } @@ -114,24 +114,8 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("failed to get update proof type from seal proof: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - enc := new(bytes.Buffer) - params := &miner.ProveReplicaUpdatesParams{ - Updates: []miner.ReplicaUpdate{ - { - SectorID: sector.SectorNumber, - Deadline: sl.Deadline, - Partition: sl.Partition, - NewSealedSectorCID: *sector.UpdateSealed, - Deals: sector.dealIDs(), - UpdateProofType: updateProof, - ReplicaProof: sector.ReplicaUpdateProof, - }, - }, - } - if err := params.MarshalCBOR(enc); err != nil { - log.Errorf("failed to serialize update replica params: %w", err) - return ctx.Send(SectorSubmitReplicaUpdateFailed{}) - } + + // figure out from address and collateral cfg, err := m.getConfig() if err != nil { @@ -140,34 +124,24 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key()) if err != nil { - log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) - return nil + log.Errorf("failed to get sector info: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } if onChainInfo == nil { - return xerrors.Errorf("sector not found %d", sector.SectorNumber) + log.Errorw("on chain info was nil", "sector", sector.SectorNumber) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - sp, err := m.currentSealProof(ctx.Context()) + weightUpdate, err := m.sectorWeight(ctx.Context(), sector, onChainInfo.Expiration) if err != nil { - log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err) - return nil - } - virtualPCI := miner.SectorPreCommitInfo{ - SealProof: sp, - SectorNumber: sector.SectorNumber, - SealedCID: *sector.UpdateSealed, - //SealRandEpoch: 0, - DealIDs: sector.dealIDs(), - Expiration: onChainInfo.Expiration, - //ReplaceCapacity: false, - //ReplaceSectorDeadline: 0, - //ReplaceSectorPartition: 0, - //ReplaceSectorNumber: 0, + log.Errorf("failed to get sector weight: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, ts.Key()) + collateral, err := m.pledgeForPower(ctx.Context(), weightUpdate) if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) + log.Errorf("failed to get pledge for power: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } collateral = big.Sub(collateral, onChainInfo.InitialPledge) @@ -194,13 +168,86 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec log.Errorf("no good address to send replica update message from: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } - mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + + // figure out message type + + nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key()) + if err != nil { + log.Errorf("failed to get network version: %+v", err) + } + + pams, deals, err := m.processPieces(ctx.Context(), sector, nv >= network.Version22) + if err != nil { + log.Errorf("failed to process pieces: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + if len(pams) > 0 { + // PRU3 + + params := &miner.ProveReplicaUpdates3Params{ + SectorUpdates: []miner.SectorUpdateManifest{ + { + Sector: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedCID: *sector.UpdateSealed, + Pieces: pams, + }, + }, + SectorProofs: [][]byte{sector.ReplicaUpdateProof}, + UpdateProofsType: updateProof, + //AggregateProof + //AggregateProofType + RequireActivationSuccess: cfg.RequireActivationSuccessUpdate, + RequireNotificationSuccess: cfg.RequireNotificationSuccessUpdate, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates3, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } + + // PRU2 + params := &miner.ProveReplicaUpdatesParams2{ + Updates: []miner.ReplicaUpdate2{ + { + SectorID: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedSectorCID: *sector.UpdateSealed, + NewUnsealedSectorCID: *sector.UpdateUnsealed, + UpdateProofType: updateProof, + ReplicaProof: sector.ReplicaUpdateProof, + Deals: deals, + }, + }, + } + + enc := new(bytes.Buffer) + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveReplicaUpdates2, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) if err != nil { log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) return ctx.Send(SectorSubmitReplicaUpdateFailed{}) } return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) + } func (m *Sealing) handleWaitMutable(ctx statemachine.Context, sector SectorInfo) error { diff --git a/storage/pipeline/states_sealing.go b/storage/pipeline/states_sealing.go index 5c91161ef..81ee85853 100644 --- a/storage/pipeline/states_sealing.go +++ b/storage/pipeline/states_sealing.go @@ -12,11 +12,15 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-state-types/abi" actorstypes "github.com/filecoin-project/go-state-types/actors" "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/builtin" + miner2 "github.com/filecoin-project/go-state-types/builtin/v13/miner" + verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg" + "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" @@ -25,6 +29,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/types" @@ -32,6 +37,8 @@ import ( "github.com/filecoin-project/lotus/storage/sealer/storiface" ) +const MinDDONetworkVersion = network.Version22 + var DealSectorPriority = 1024 var MaxTicketAge = policy.MaxPreCommitRandomnessLookback @@ -59,7 +66,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err m.cleanupAssignedDeals(sector) // if this is a snapdeals sector, but it ended up not having any deals, abort the upgrade - if sector.State == SnapDealsPacking && !sector.hasDeals() { + if sector.State == SnapDealsPacking && !sector.hasData() { return ctx.Send(SectorAbortUpgrade{xerrors.New("sector had no deals")}) } @@ -67,7 +74,7 @@ func (m *Sealing) handlePacking(ctx statemachine.Context, sector SectorInfo) err var allocated abi.UnpaddedPieceSize for _, piece := range sector.Pieces { - allocated += piece.Piece.Size.Unpadded() + allocated += piece.Piece().Size.Unpadded() } ssize, err := sector.SectorType.SectorSize() @@ -417,11 +424,47 @@ func (m *Sealing) preCommitInfo(ctx statemachine.Context, sector SectorInfo) (*m SealedCID: *sector.CommR, SealRandEpoch: sector.TicketEpoch, - DealIDs: sector.dealIDs(), } - if sector.hasDeals() { + if sector.hasData() { + // only CC sectors don't have UnsealedCID params.UnsealedCid = sector.CommD + + // true when the sector has non-builtin-marked data + sectorIsDDO := false + + for _, piece := range sector.Pieces { + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + return nil // ignore + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if sectorIsDDO { + return nil // will be passed later in the Commit message + } + params.DealIDs = append(params.DealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + if nv < MinDDONetworkVersion { + return xerrors.Errorf("DDO sectors are not supported on network version %d", nv) + } + + log.Infow("DDO piece in sector", "sector", sector.SectorNumber, "piece", info.String()) + + sectorIsDDO = true + + // DDO sectors don't carry DealIDs, we will pass those + // deals in the Commit message later + params.DealIDs = nil + return nil + }, + }) + + if err != nil { + return nil, big.Zero(), types.EmptyTSK, xerrors.Errorf("handleDealInfo: %w", err) + } + } } collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, ts.Key()) @@ -572,10 +615,6 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) return xerrors.Errorf("getting config: %w", err) } - log.Info("scheduling seal proof computation...") - - log.Infof("KOMIT %d %x(%d); %x(%d); %v; r:%s; d:%s", sector.SectorNumber, sector.TicketValue, sector.TicketEpoch, sector.SeedValue, sector.SeedEpoch, sector.pieceInfos(), sector.CommR, sector.CommD) - if sector.CommD == nil || sector.CommR == nil { return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } @@ -700,87 +739,110 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } func (m *Sealing) handleSubmitCommit(ctx statemachine.Context, sector SectorInfo) error { - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting config: %w", err) - } + // like precommit this is a deprecated state, but we keep it around for + // existing state machines + // todo: drop after nv21 + return ctx.Send(SectorSubmitCommitAggregate{}) +} - if cfg.AggregateCommits { - nv, err := m.Api.StateNetworkVersion(ctx.Context(), types.EmptyTSK) +// processPieces returns either: +// - a list of piece activation manifests +// - a list of deal IDs, if all non-filler pieces are deal-id pieces +func (m *Sealing) processPieces(ctx context.Context, sector SectorInfo, forceDDO bool) ([]miner.PieceActivationManifest, []abi.DealID, error) { + pams := make([]miner.PieceActivationManifest, 0, len(sector.Pieces)) + dealIDs := make([]abi.DealID, 0, len(sector.Pieces)) + hasDDO := forceDDO + + if !forceDDO { + // if not forcing DDO, check if we have any DDO pieces + for _, piece := range sector.Pieces { + piece := piece + + // first figure out if this is a ddo sector + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + hasDDO = true + return nil + }, + }) + if err != nil { + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) + } + } + } + for _, piece := range sector.Pieces { + piece := piece + + err := piece.handleDealInfo(handleDealInfoParams{ + FillerHandler: func(info UniversalPieceInfo) error { + // Fillers are implicit (todo review: Are they??) + return nil + }, + BuiltinMarketHandler: func(info UniversalPieceInfo) error { + if hasDDO { + alloc, err := m.Api.StateGetAllocationIdForPendingDeal(ctx, info.Impl().DealID, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting allocation for deal %d: %w", info.Impl().DealID, err) + } + clid, err := m.Api.StateLookupID(ctx, info.Impl().DealProposal.Client, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + clientId, err := address.IDFromAddress(clid) + if err != nil { + return xerrors.Errorf("getting client address for deal %d: %w", info.Impl().DealID, err) + } + + var vac *miner2.VerifiedAllocationKey + if alloc != verifreg.NoAllocationID { + vac = &miner2.VerifiedAllocationKey{ + Client: abi.ActorID(clientId), + ID: verifreg13.AllocationId(alloc), + } + } + + payload, err := cborutil.Dump(info.Impl().DealID) + if err != nil { + return xerrors.Errorf("serializing deal id: %w", err) + } + + pams = append(pams, miner.PieceActivationManifest{ + CID: piece.Piece().PieceCID, + Size: piece.Piece().Size, + VerifiedAllocationKey: vac, + Notify: []miner2.DataActivationNotification{ + { + Address: market.Address, + Payload: payload, + }, + }, + }) + + return nil + } + + dealIDs = append(dealIDs, info.Impl().DealID) + return nil + }, + DDOHandler: func(info UniversalPieceInfo) error { + pams = append(pams, *piece.Impl().PieceActivationManifest) + return nil + }, + }) if err != nil { - return xerrors.Errorf("getting network version: %w", err) - } - - if nv >= network.Version13 { - return ctx.Send(SectorSubmitCommitAggregate{}) + return nil, nil, xerrors.Errorf("handleDealInfo: %w", err) } } - ts, err := m.Api.ChainHead(ctx.Context()) - if err != nil { - log.Errorf("handleSubmitCommit: api error, not proceeding: %+v", err) - return nil - } - - if err := m.checkCommit(ctx.Context(), sector, sector.Proof, ts.Key()); err != nil { - return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)}) - } - - enc := new(bytes.Buffer) - params := &miner.ProveCommitSectorParams{ - SectorNumber: sector.SectorNumber, - Proof: sector.Proof, - } - - if err := params.MarshalCBOR(enc); err != nil { - return ctx.Send(SectorCommitFailed{xerrors.Errorf("could not serialize commit sector parameters: %w", err)}) - } - - mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, ts.Key()) - if err != nil { - log.Errorf("handleCommitting: api error, not proceeding: %+v", err) - return nil - } - - pci, err := m.Api.StateSectorPreCommitInfo(ctx.Context(), m.maddr, sector.SectorNumber, ts.Key()) - if err != nil { - return xerrors.Errorf("getting precommit info: %w", err) - } - if pci == nil { - return ctx.Send(SectorCommitFailed{error: xerrors.Errorf("precommit info not found on chain")}) - } - - collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, pci.Info, ts.Key()) - if err != nil { - return xerrors.Errorf("getting initial pledge collateral: %w", err) - } - - collateral = big.Sub(collateral, pci.PreCommitDeposit) - if collateral.LessThan(big.Zero()) { - collateral = big.Zero() - } - - collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral) - if err != nil { - return err - } - - goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) - - from, _, err := m.addrSel.AddressFor(ctx.Context(), m.Api, mi, api.CommitAddr, goodFunds, collateral) - if err != nil { - return ctx.Send(SectorCommitFailed{xerrors.Errorf("no good address to send commit message from: %w", err)}) - } - - // TODO: check seed / ticket / deals are up to date - mcid, err := sendMsg(ctx.Context(), m.Api, from, m.maddr, builtin.MethodsMiner.ProveCommitSector, collateral, big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) - if err != nil { - return ctx.Send(SectorCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) - } - - return ctx.Send(SectorCommitSubmitted{ - Message: mcid, - }) + return pams, dealIDs, nil } func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector SectorInfo) error { @@ -788,6 +850,11 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S return ctx.Send(SectorCommitFailed{xerrors.Errorf("sector had nil commR or commD")}) } + pams, dealIDs, err := m.processPieces(ctx.Context(), sector, false) + if err != nil { + return err + } + res, err := m.commiter.AddCommit(ctx.Context(), sector, AggregateInput{ Info: proof.AggregateSealVerifyInfo{ Number: sector.SectorNumber, @@ -796,8 +863,14 @@ func (m *Sealing) handleSubmitCommitAggregate(ctx statemachine.Context, sector S SealedCID: *sector.CommR, UnsealedCID: *sector.CommD, }, - Proof: sector.Proof, // todo: this correct?? + Proof: sector.Proof, Spt: sector.SectorType, + + ActivationManifest: miner2.SectorActivationManifest{ + SectorNumber: sector.SectorNumber, + Pieces: pams, + }, + DealIDPrecommit: len(dealIDs) > 0, }) if err != nil || res.Error != "" { @@ -875,7 +948,7 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } - if cfg.MakeCCSectorsAvailable && !sector.hasDeals() { + if cfg.MakeCCSectorsAvailable && !sector.hasData() { return ctx.Send(SectorFinalizedAvailable{}) } return ctx.Send(SectorFinalized{}) diff --git a/storage/pipeline/types.go b/storage/pipeline/types.go index e752eb2b9..7b263dd6a 100644 --- a/storage/pipeline/types.go +++ b/storage/pipeline/types.go @@ -2,14 +2,20 @@ package sealing import ( "context" + "encoding/json" + "io" "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -41,6 +47,20 @@ const ( RetCommitFailed = ReturnState(CommitFailed) ) +type UniversalPieceInfo interface { + Impl() piece.PieceDealInfo + String() string + Key() piece.PieceKey + + Valid(nv network.Version) error + StartEpoch() (abi.ChainEpoch, error) + EndEpoch() (abi.ChainEpoch, error) + PieceCID() cid.Cid + KeepUnsealedRequested() bool + + GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) +} + type SectorInfo struct { State SectorState SectorNumber abi.SectorNumber @@ -49,7 +69,7 @@ type SectorInfo struct { // Packing CreationTime int64 // unix seconds - Pieces []api.SectorPiece + Pieces []SafeSectorPiece // PreCommit1 TicketValue abi.SealRandomness @@ -79,7 +99,7 @@ type SectorInfo struct { // CCUpdate CCUpdate bool - CCPieces []api.SectorPiece + CCPieces []SafeSectorPiece UpdateSealed *cid.Cid UpdateUnsealed *cid.Cid ReplicaUpdateProof storiface.ReplicaUpdateProof @@ -113,18 +133,19 @@ type SectorInfo struct { func (t *SectorInfo) pieceInfos() []abi.PieceInfo { out := make([]abi.PieceInfo, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece + out[i] = p.Piece() } return out } -func (t *SectorInfo) dealIDs() []abi.DealID { - out := make([]abi.DealID, 0, len(t.Pieces)) - for _, p := range t.Pieces { - if p.DealInfo == nil { +func (t *SectorInfo) nonPaddingPieceInfos() []abi.PieceInfo { + out := make([]abi.PieceInfo, len(t.Pieces)) + for i, p := range t.Pieces { + if !p.HasDealInfo() { continue } - out = append(out, p.DealInfo.DealID) + + out[i] = p.Piece() } return out } @@ -132,14 +153,14 @@ func (t *SectorInfo) dealIDs() []abi.DealID { func (t *SectorInfo) existingPieceSizes() []abi.UnpaddedPieceSize { out := make([]abi.UnpaddedPieceSize, len(t.Pieces)) for i, p := range t.Pieces { - out[i] = p.Piece.Size.Unpadded() + out[i] = p.Piece().Size.Unpadded() } return out } -func (t *SectorInfo) hasDeals() bool { +func (t *SectorInfo) hasData() bool { for _, piece := range t.Pieces { - if piece.DealInfo != nil { + if piece.HasDealInfo() { return true } } @@ -151,7 +172,7 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // TODO: can also take start epoch into account to give priority to sectors // we need sealed sooner - if t.hasDeals() { + if t.hasData() { return sealer.WithPriority(ctx, DealSectorPriority) } @@ -160,19 +181,19 @@ func (t *SectorInfo) sealingCtx(ctx context.Context) context.Context { // Returns list of offset/length tuples of sector data ranges which clients // requested to keep unsealed -func (t *SectorInfo) keepUnsealedRanges(pieces []api.SectorPiece, invert, alwaysKeep bool) []storiface.Range { +func (t *SectorInfo) keepUnsealedRanges(pieces []SafeSectorPiece, invert, alwaysKeep bool) []storiface.Range { var out []storiface.Range var at abi.UnpaddedPieceSize for _, piece := range pieces { - psize := piece.Piece.Size.Unpadded() + psize := piece.Piece().Size.Unpadded() at += psize - if piece.DealInfo == nil { + if !piece.HasDealInfo() { continue } - keep := piece.DealInfo.KeepUnsealed || alwaysKeep + keep := piece.DealInfo().KeepUnsealedRequested() || alwaysKeep if keep == invert { continue @@ -195,3 +216,138 @@ type SealingStateEvt struct { After SectorState Error string } + +// SafeSectorPiece is a wrapper around SectorPiece which makes it hard to misuse +// especially by making it hard to access raw Deal / DDO info +type SafeSectorPiece struct { + real api.SectorPiece +} + +func SafePiece(piece api.SectorPiece) SafeSectorPiece { + return SafeSectorPiece{piece} +} + +var _ UniversalPieceInfo = &SafeSectorPiece{} + +func (sp *SafeSectorPiece) Piece() abi.PieceInfo { + return sp.real.Piece +} + +func (sp *SafeSectorPiece) HasDealInfo() bool { + return sp.real.DealInfo != nil +} + +func (sp *SafeSectorPiece) DealInfo() UniversalPieceInfo { + return sp.real.DealInfo +} + +// cbor passthrough +func (sp *SafeSectorPiece) UnmarshalCBOR(r io.Reader) (err error) { + return sp.real.UnmarshalCBOR(r) +} + +func (sp *SafeSectorPiece) MarshalCBOR(w io.Writer) error { + return sp.real.MarshalCBOR(w) +} + +// json passthrough +func (sp *SafeSectorPiece) UnmarshalJSON(b []byte) error { + return json.Unmarshal(b, &sp.real) +} + +func (sp *SafeSectorPiece) MarshalJSON() ([]byte, error) { + return json.Marshal(sp.real) +} + +type handleDealInfoParams struct { + FillerHandler func(UniversalPieceInfo) error + BuiltinMarketHandler func(UniversalPieceInfo) error + DDOHandler func(UniversalPieceInfo) error +} + +func (sp *SafeSectorPiece) handleDealInfo(params handleDealInfoParams) error { + if !sp.HasDealInfo() { + if params.FillerHandler == nil { + return xerrors.Errorf("FillerHandler is not provided") + } + return params.FillerHandler(sp) + } + + if sp.real.DealInfo.PublishCid != nil { + if params.BuiltinMarketHandler == nil { + return xerrors.Errorf("BuiltinMarketHandler is not provided") + } + return params.BuiltinMarketHandler(sp) + } + + if params.DDOHandler == nil { + return xerrors.Errorf("DDOHandler is not provided") + } + return params.DDOHandler(sp) +} + +// SectorPiece Proxy + +func (sp *SafeSectorPiece) Impl() piece.PieceDealInfo { + if !sp.HasDealInfo() { + return piece.PieceDealInfo{} + } + + return sp.real.DealInfo.Impl() +} + +func (sp *SafeSectorPiece) String() string { + if !sp.HasDealInfo() { + return "" + } + + return sp.real.DealInfo.String() +} + +func (sp *SafeSectorPiece) Key() piece.PieceKey { + return sp.real.DealInfo.Key() +} + +func (sp *SafeSectorPiece) Valid(nv network.Version) error { + return sp.real.DealInfo.Valid(nv) +} + +func (sp *SafeSectorPiece) StartEpoch() (abi.ChainEpoch, error) { + if !sp.HasDealInfo() { + return 0, xerrors.Errorf("no deal info") + } + + return sp.real.DealInfo.StartEpoch() +} + +func (sp *SafeSectorPiece) EndEpoch() (abi.ChainEpoch, error) { + if !sp.HasDealInfo() { + return 0, xerrors.Errorf("no deal info") + } + + return sp.real.DealInfo.EndEpoch() +} + +func (sp *SafeSectorPiece) PieceCID() cid.Cid { + if !sp.HasDealInfo() { + return sp.real.Piece.PieceCID + } + + return sp.real.DealInfo.PieceCID() +} + +func (sp *SafeSectorPiece) KeepUnsealedRequested() bool { + if !sp.HasDealInfo() { + return false + } + + return sp.real.DealInfo.KeepUnsealedRequested() +} + +func (sp *SafeSectorPiece) GetAllocation(ctx context.Context, aapi piece.AllocationAPI, tsk types.TipSetKey) (*verifreg.Allocation, error) { + if !sp.HasDealInfo() { + return nil, xerrors.Errorf("no deal info") + } + + return sp.real.DealInfo.GetAllocation(ctx, aapi, tsk) +} diff --git a/storage/pipeline/types_test.go b/storage/pipeline/types_test.go index b8fbb113a..d92b68d55 100644 --- a/storage/pipeline/types_test.go +++ b/storage/pipeline/types_test.go @@ -13,6 +13,7 @@ import ( tutils "github.com/filecoin-project/specs-actors/v2/support/testing" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/storage/pipeline/piece" ) func TestSectorInfoSerialization(t *testing.T) { @@ -23,9 +24,9 @@ func TestSectorInfoSerialization(t *testing.T) { t.Fatal(err) } - dealInfo := api.PieceDealInfo{ + dealInfo := piece.PieceDealInfo{ DealID: d, - DealSchedule: api.DealSchedule{ + DealSchedule: piece.DealSchedule{ StartEpoch: 0, EndEpoch: 100, }, @@ -43,13 +44,13 @@ func TestSectorInfoSerialization(t *testing.T) { si := &SectorInfo{ State: "stateful", SectorNumber: 234, - Pieces: []api.SectorPiece{{ + Pieces: []SafeSectorPiece{{real: api.SectorPiece{ Piece: abi.PieceInfo{ Size: 5, PieceCID: dummyCid, }, DealInfo: &dealInfo, - }}, + }}}, CommD: &dummyCid, CommR: nil, Proof: nil, @@ -77,8 +78,8 @@ func TestSectorInfoSerialization(t *testing.T) { assert.Equal(t, si.State, si2.State) assert.Equal(t, si.SectorNumber, si2.SectorNumber) - assert.Equal(t, si.Pieces[0].DealInfo.DealID, si2.Pieces[0].DealInfo.DealID) - assert.Equal(t, si.Pieces[0].DealInfo.DealProposal.PieceCID, si2.Pieces[0].DealInfo.DealProposal.PieceCID) + assert.Equal(t, si.Pieces[0].Impl().DealID, si2.Pieces[0].Impl().DealID) + assert.Equal(t, si.Pieces[0].Impl().DealProposal.PieceCID, si2.Pieces[0].Impl().DealProposal.PieceCID) assert.Equal(t, *si.CommD, *si2.CommD) assert.DeepEqual(t, si.TicketValue, si2.TicketValue) assert.Equal(t, si.TicketEpoch, si2.TicketEpoch) diff --git a/storage/pipeline/upgrade_queue.go b/storage/pipeline/upgrade_queue.go index 9d9e1ca46..5e3392a9f 100644 --- a/storage/pipeline/upgrade_queue.go +++ b/storage/pipeline/upgrade_queue.go @@ -21,7 +21,7 @@ func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error return xerrors.Errorf("unable to snap-up sectors not in the 'Proving' state") } - if si.hasDeals() { + if si.hasData() { return xerrors.Errorf("not a committed-capacity sector, has deals") } diff --git a/storage/sealer/cbor_gen.go b/storage/sealer/cbor_gen.go index 22da1b520..e4b8e644d 100644 --- a/storage/sealer/cbor_gen.go +++ b/storage/sealer/cbor_gen.go @@ -33,7 +33,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.ID (storiface.CallID) (struct) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -49,7 +49,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.State (sealer.CallState) (uint64) - if len("State") > cbg.MaxLength { + if len("State") > 8192 { return xerrors.Errorf("Value in field \"State\" was too long") } @@ -65,7 +65,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.Result (sealer.ManyBytes) (struct) - if len("Result") > cbg.MaxLength { + if len("Result") > 8192 { return xerrors.Errorf("Value in field \"Result\" was too long") } @@ -81,7 +81,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { } // t.RetType (sealer.ReturnType) (string) - if len("RetType") > cbg.MaxLength { + if len("RetType") > 8192 { return xerrors.Errorf("Value in field \"RetType\" was too long") } @@ -92,7 +92,7 @@ func (t *Call) MarshalCBOR(w io.Writer) error { return err } - if len(t.RetType) > cbg.MaxLength { + if len(t.RetType) > 8192 { return xerrors.Errorf("Value in field t.RetType was too long") } @@ -134,7 +134,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -192,7 +192,7 @@ func (t *Call) UnmarshalCBOR(r io.Reader) (err error) { case "RetType": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -221,7 +221,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.ID (sealer.WorkID) (struct) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -237,7 +237,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.Status (sealer.WorkStatus) (string) - if len("Status") > cbg.MaxLength { + if len("Status") > 8192 { return xerrors.Errorf("Value in field \"Status\" was too long") } @@ -248,7 +248,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.Status) > cbg.MaxLength { + if len(t.Status) > 8192 { return xerrors.Errorf("Value in field t.Status was too long") } @@ -260,7 +260,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.StartTime (int64) (int64) - if len("StartTime") > cbg.MaxLength { + if len("StartTime") > 8192 { return xerrors.Errorf("Value in field \"StartTime\" was too long") } @@ -282,7 +282,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkError (string) (string) - if len("WorkError") > cbg.MaxLength { + if len("WorkError") > 8192 { return xerrors.Errorf("Value in field \"WorkError\" was too long") } @@ -293,7 +293,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.WorkError) > cbg.MaxLength { + if len(t.WorkError) > 8192 { return xerrors.Errorf("Value in field t.WorkError was too long") } @@ -305,7 +305,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkerCall (storiface.CallID) (struct) - if len("WorkerCall") > cbg.MaxLength { + if len("WorkerCall") > 8192 { return xerrors.Errorf("Value in field \"WorkerCall\" was too long") } @@ -321,7 +321,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { } // t.WorkerHostname (string) (string) - if len("WorkerHostname") > cbg.MaxLength { + if len("WorkerHostname") > 8192 { return xerrors.Errorf("Value in field \"WorkerHostname\" was too long") } @@ -332,7 +332,7 @@ func (t *WorkState) MarshalCBOR(w io.Writer) error { return err } - if len(t.WorkerHostname) > cbg.MaxLength { + if len(t.WorkerHostname) > 8192 { return xerrors.Errorf("Value in field t.WorkerHostname was too long") } @@ -374,7 +374,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -397,7 +397,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "Status": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -408,10 +408,10 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "StartTime": { maj, extra, err := cr.ReadHeader() - var extraI int64 if err != nil { return err } + var extraI int64 switch maj { case cbg.MajUnsignedInt: extraI = int64(extra) @@ -434,7 +434,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "WorkError": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -455,7 +455,7 @@ func (t *WorkState) UnmarshalCBOR(r io.Reader) (err error) { case "WorkerHostname": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -484,7 +484,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { } // t.Method (sealtasks.TaskType) (string) - if len("Method") > cbg.MaxLength { + if len("Method") > 8192 { return xerrors.Errorf("Value in field \"Method\" was too long") } @@ -495,7 +495,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { return err } - if len(t.Method) > cbg.MaxLength { + if len(t.Method) > 8192 { return xerrors.Errorf("Value in field t.Method was too long") } @@ -507,7 +507,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { } // t.Params (string) (string) - if len("Params") > cbg.MaxLength { + if len("Params") > 8192 { return xerrors.Errorf("Value in field \"Params\" was too long") } @@ -518,7 +518,7 @@ func (t *WorkID) MarshalCBOR(w io.Writer) error { return err } - if len(t.Params) > cbg.MaxLength { + if len(t.Params) > 8192 { return xerrors.Errorf("Value in field t.Params was too long") } @@ -560,7 +560,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -573,7 +573,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { case "Method": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -584,7 +584,7 @@ func (t *WorkID) UnmarshalCBOR(r io.Reader) (err error) { case "Params": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } diff --git a/storage/sealer/storiface/cbor_gen.go b/storage/sealer/storiface/cbor_gen.go index 0b42136ea..79d6219eb 100644 --- a/storage/sealer/storiface/cbor_gen.go +++ b/storage/sealer/storiface/cbor_gen.go @@ -31,7 +31,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { } // t.ID (uuid.UUID) (array) - if len("ID") > cbg.MaxLength { + if len("ID") > 8192 { return xerrors.Errorf("Value in field \"ID\" was too long") } @@ -42,7 +42,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { return err } - if len(t.ID) > cbg.ByteArrayMaxLen { + if len(t.ID) > 2097152 { return xerrors.Errorf("Byte array in field t.ID was too long") } @@ -55,7 +55,7 @@ func (t *CallID) MarshalCBOR(w io.Writer) error { } // t.Sector (abi.SectorID) (struct) - if len("Sector") > cbg.MaxLength { + if len("Sector") > 8192 { return xerrors.Errorf("Value in field \"Sector\" was too long") } @@ -101,7 +101,7 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -118,19 +118,17 @@ func (t *CallID) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.ByteArrayMaxLen { + if extra > 2097152 { return fmt.Errorf("t.ID: byte array too large (%d)", extra) } if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - if extra != 16 { return fmt.Errorf("expected array to have 16 elements") } t.ID = [16]uint8{} - if _, err := io.ReadFull(cr, t.ID[:]); err != nil { return err } @@ -166,7 +164,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { } // t.Key (string) (string) - if len("Key") > cbg.MaxLength { + if len("Key") > 8192 { return xerrors.Errorf("Value in field \"Key\" was too long") } @@ -177,7 +175,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { return err } - if len(t.Key) > cbg.MaxLength { + if len(t.Key) > 8192 { return xerrors.Errorf("Value in field t.Key was too long") } @@ -189,7 +187,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { } // t.Value (string) (string) - if len("Value") > cbg.MaxLength { + if len("Value") > 8192 { return xerrors.Errorf("Value in field \"Value\" was too long") } @@ -200,7 +198,7 @@ func (t *SecDataHttpHeader) MarshalCBOR(w io.Writer) error { return err } - if len(t.Value) > cbg.MaxLength { + if len(t.Value) > 8192 { return xerrors.Errorf("Value in field t.Value was too long") } @@ -242,7 +240,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -255,7 +253,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { case "Key": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -266,7 +264,7 @@ func (t *SecDataHttpHeader) UnmarshalCBOR(r io.Reader) (err error) { case "Value": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -295,7 +293,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.URL (string) (string) - if len("URL") > cbg.MaxLength { + if len("URL") > 8192 { return xerrors.Errorf("Value in field \"URL\" was too long") } @@ -306,7 +304,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { return err } - if len(t.URL) > cbg.MaxLength { + if len(t.URL) > 8192 { return xerrors.Errorf("Value in field t.URL was too long") } @@ -318,7 +316,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.Local (bool) (bool) - if len("Local") > cbg.MaxLength { + if len("Local") > 8192 { return xerrors.Errorf("Value in field \"Local\" was too long") } @@ -334,7 +332,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { } // t.Headers ([]storiface.SecDataHttpHeader) (slice) - if len("Headers") > cbg.MaxLength { + if len("Headers") > 8192 { return xerrors.Errorf("Value in field \"Headers\" was too long") } @@ -345,7 +343,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { return err } - if len(t.Headers) > cbg.MaxLength { + if len(t.Headers) > 8192 { return xerrors.Errorf("Slice value in field t.Headers was too long") } @@ -356,6 +354,7 @@ func (t *SectorLocation) MarshalCBOR(w io.Writer) error { if err := v.MarshalCBOR(cw); err != nil { return err } + } return nil } @@ -389,7 +388,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { for i := uint64(0); i < n; i++ { { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -402,7 +401,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { case "URL": { - sval, err := cbg.ReadString(cr) + sval, err := cbg.ReadStringWithMax(cr, 8192) if err != nil { return err } @@ -435,7 +434,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { return err } - if extra > cbg.MaxLength { + if extra > 8192 { return fmt.Errorf("t.Headers: array too large (%d)", extra) } @@ -463,6 +462,7 @@ func (t *SectorLocation) UnmarshalCBOR(r io.Reader) (err error) { } } + } } diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index 1593174bd..4b84e18fb 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/storage/pipeline/piece" "github.com/filecoin-project/lotus/storage/sealer/storiface" ) @@ -48,7 +49,7 @@ func DsKeyToDealID(key datastore.Key) (uint64, error) { } type SectorBuilder interface { - SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d api.PieceDealInfo) (api.SectorOffset, error) + SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d piece.PieceDealInfo) (api.SectorOffset, error) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (api.SectorInfo, error) } @@ -100,7 +101,7 @@ func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorI return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } -func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { +func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d piece.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { so, err := st.SectorBuilder.SectorAddPieceToAny(ctx, size, r, d) if err != nil { return 0, 0, err