diff --git a/CHANGELOG.md b/CHANGELOG.md index e4fc4c67b..330a14d85 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,12 +1,141 @@ # Lotus changelog +# 1.15.0 / 2022-03-09 + +This is an optional release with retrieval improvements(client side), SP ux with unsealing, snap deals and regular deal making and many other new features, improvements and bug fixes. + +## Highlights +- feat:sealing: StartEpochSealingBuffer triggers packing on time([filecoin-project/lotus#7905](https://github.com/filecoin-project/lotus/pull/7905)) + - use the `StartEpochSealingBuffer` configuration variable as a way to enforce that sectors are packed for sealing / updating no matter how many deals they have if the nearest deal start date is close enough to the present. +- feat: #6017 market: retrieval ask CLI command ([filecoin-project/lotus#7814](https://github.com/filecoin-project/lotus/pull/7814)) +- feat(graphsync): allow setting of per-peer incoming requests for miners ([filecoin-project/lotus#7578](https://github.com/filecoin-project/lotus/pull/7578)) + - by setting `SimultaneousTransfersForStoragePerClient` in deal making configuration. +- Make retrieval even faster ([filecoin-project/lotus#7746](https://github.com/filecoin-project/lotus/pull/7746)) +- feat: #7747 sealing: Adding conf variable for capping number of concurrent unsealing jobs (#7884) ([filecoin-project/lotus#7884](https://github.com/filecoin-project/lotus/pull/7884)) + - by setting `MaxConcurrentUnseals` in `DAGStoreConfig` + +## New Features +- feat: mpool: Cache state nonces ([filecoin-project/lotus#8005](https://github.com/filecoin-project/lotus/pull/8005)) +- chore: build: make the OhSnap epoch configurable by an envvar for devnets ([filecoin-project/lotus#7995](https://github.com/filecoin-project/lotus/pull/7995)) +- Shed: Add a util to send a batch of messages ([filecoin-project/lotus#7667](https://github.com/filecoin-project/lotus/pull/7667)) +- Add api for transfer diagnostics ([filecoin-project/lotus#7759](https://github.com/filecoin-project/lotus/pull/7759)) +- Shed: Add a util to list terminated deals ([filecoin-project/lotus#7774](https://github.com/filecoin-project/lotus/pull/7774)) +- Expose EnableGasTracing as an env_var ([filecoin-project/lotus#7750](https://github.com/filecoin-project/lotus/pull/7750)) +- Command to list active sector locks ([filecoin-project/lotus#7735](https://github.com/filecoin-project/lotus/pull/7735)) +- Initial switch to OpenTelemetry ([filecoin-project/lotus#7725](https://github.com/filecoin-project/lotus/pull/7725)) + +## Improvements +- splitstore sortless compaction ([filecoin-project/lotus#8008](https://github.com/filecoin-project/lotus/pull/8008)) +- perf: chain: Make drand logs in daemon less noisy (#7955) ([filecoin-project/lotus#7955](https://github.com/filecoin-project/lotus/pull/7955)) +- chore: shed: storage stats 2.0 ([filecoin-project/lotus#7941](https://github.com/filecoin-project/lotus/pull/7941)) +- misc: api: Annotate lotus tests according to listed behaviors ([filecoin-project/lotus#7835](https://github.com/filecoin-project/lotus/pull/7835)) +- some basic splitstore refactors ([filecoin-project/lotus#7999](https://github.com/filecoin-project/lotus/pull/7999)) +- chore: sealer: quieten a log ([filecoin-project/lotus#7998](https://github.com/filecoin-project/lotus/pull/7998)) +- tvx: supply network version when extracting messages. ([filecoin-project/lotus#7996](https://github.com/filecoin-project/lotus/pull/7996)) +- chore: remove inaccurate comment in sealtasks ([filecoin-project/lotus#7977](https://github.com/filecoin-project/lotus/pull/7977)) +- Refactor: VM: Remove the NetworkVersionGetter ([filecoin-project/lotus#7818](https://github.com/filecoin-project/lotus/pull/7818)) +- refactor: state: Move randomness versioning out of the VM ([filecoin-project/lotus#7816](https://github.com/filecoin-project/lotus/pull/7816)) +- updating to new datastore/blockstore code with contexts ([filecoin-project/lotus#7646](https://github.com/filecoin-project/lotus/pull/7646)) +- Mempool msg selection should respect block message limits ([filecoin-project/lotus#7321](https://github.com/filecoin-project/lotus/pull/7321)) +- Minor improvement for OpenTelemetry ([filecoin-project/lotus#7760](https://github.com/filecoin-project/lotus/pull/7760)) +- Sort lotus-miner retrieval-deals by dealId ([filecoin-project/lotus#7749](https://github.com/filecoin-project/lotus/pull/7749)) +- dagstore pieceReader: Always read full in ReadAt ([filecoin-project/lotus#7737](https://github.com/filecoin-project/lotus/pull/7737)) + +## Bug Fixes +- fix: sealing: Stop recovery attempts after fault ([filecoin-project/lotus#8014](https://github.com/filecoin-project/lotus/pull/8014)) +- fix:snap: pay for the collateral difference needed if the miner available balance is insufficient ([filecoin-project/lotus#8234](https://github.com/filecoin-project/lotus/pull/8234)) +- sealer: fix error message ([filecoin-project/lotus#8136](https://github.com/filecoin-project/lotus/pull/8136)) +- typo in variable name ([filecoin-project/lotus#8134](https://github.com/filecoin-project/lotus/pull/8134)) +- fix: sealer: allow enable/disabling ReplicaUpdate tasks ([filecoin-project/lotus#8093](https://github.com/filecoin-project/lotus/pull/8093)) +- chore: chain: fix log ([filecoin-project/lotus#7993](https://github.com/filecoin-project/lotus/pull/7993)) +- Fix: chain: create a new VM for each epoch ([filecoin-project/lotus#7966](https://github.com/filecoin-project/lotus/pull/7966)) +- fix: doc generation struct slice example value ([filecoin-project/lotus#7851](https://github.com/filecoin-project/lotus/pull/7851)) +- fix: returned error not be accept correctly ([filecoin-project/lotus#7852](https://github.com/filecoin-project/lotus/pull/7852)) +- fix: #7577 markets: When retrying Add Piece, first seek to start of reader ([filecoin-project/lotus#7812](https://github.com/filecoin-project/lotus/pull/7812)) +- misc: n/a sealing: Fix grammatical error in a log warning message ([filecoin-project/lotus#7831](https://github.com/filecoin-project/lotus/pull/7831)) +- sectors update-state checks if sector exists before changing its state ([filecoin-project/lotus#7762](https://github.com/filecoin-project/lotus/pull/7762)) +- SplitStore: supress compaction near upgrades ([filecoin-project/lotus#7734](https://github.com/filecoin-project/lotus/pull/7734)) + +## Dependency Updates +- github.com/filecoin-project/go-commp-utils (v0.1.2 -> v0.1.3): +- github.com/filecoin-project/dagstore (v0.4.3 -> v0.4.4): +- github.com/filecoin-project/go-fil-markets (v1.13.4 -> v1.19.2): +- github.com/filecoin-project/go-statestore (v0.1.1 -> v0.2.0): +- github.com/filecoin-project/go-storedcounter (v0.0.0-20200421200003-1c99c62e8a5b -> v0.1.0): +- github.com/filecoin-project/specs-actors/v2 (v2.3.5 -> v2.3.6): + - feat(deps): update markets stack ([filecoin-project/lotus#7959](https://github.com/filecoin-project/lotus/pull/7959)) + - Use go-libp2p-connmgr v0.3.1 ([filecoin-project/lotus#7957](https://github.com/filecoin-project/lotus/pull/7957)) + - dep/fix 7701 Dependency: update to ipld-legacy to v0.1.1 ([filecoin-project/lotus#7751](https://github.com/filecoin-project/lotus/pull/7751)) + +## Others +- chore: backport: release ([filecoin-project/lotus#8245](https://github.com/filecoin-project/lotus/pull/8245)) +- Lotus release v1.15.0-rc3 ([filecoin-project/lotus#8236](https://github.com/filecoin-project/lotus/pull/8236)) +- Lotus release v1.15.0-rc2 ([filecoin-project/lotus#8211](https://github.com/filecoin-project/lotus/pull/8211)) +- Merge branch 'releases' into release/v1.15.0 +- chore: build: backport releases ([filecoin-project/lotus#8193](https://github.com/filecoin-project/lotus/pull/8193)) +- Merge branch 'releases' into release/v1.15.0 +- bump the version to v1.15.0-rc1 +- chore: build: v1.14.0 -> master ([filecoin-project/lotus#8053](https://github.com/filecoin-project/lotus/pull/8053)) +- chore: merge release/v1.14.0 PRs into master ([filecoin-project/lotus#7979](https://github.com/filecoin-project/lotus/pull/7979)) +- chore: update PR template ([filecoin-project/lotus#7918](https://github.com/filecoin-project/lotus/pull/7918)) +- build: release: bump master version to v1.15.0-dev ([filecoin-project/lotus#7922](https://github.com/filecoin-project/lotus/pull/7922)) +- misc: docs: remove issue number from the pr title ([filecoin-project/lotus#7902](https://github.com/filecoin-project/lotus/pull/7902)) +- Snapcraft grade no develgrade ([filecoin-project/lotus#7802](https://github.com/filecoin-project/lotus/pull/7802)) +- chore: create pull_request_template.md ([filecoin-project/lotus#7726](https://github.com/filecoin-project/lotus/pull/7726)) +- Disable appimage ([filecoin-project/lotus#7707](https://github.com/filecoin-project/lotus/pull/7707)) + +## Contributors +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| @arajasek | 73 | +7232/-2778 | 386 | +| @zenground0 | 27 | +5604/-1049 | 219 | +| @vyzo | 118 | +4356/-1470 | 253 | +| @zl | 1 | +3725/-309 | 8 | +| @dirkmc | 7 | +1392/-1110 | 61 | +| arajasek | 37 | +221/-1329 | 90 | +| @magik6k | 33 | +1138/-336 | 101 | +| @whyrusleeping | 2 | +483/-585 | 28 | +| Darko Brdareski | 14 | +725/-276 | 154 | +| @rvagg | 2 | +43/-947 | 10 | +| @hannahhoward | 5 | +436/-335 | 31 | +| @hannahhoward | 12 | +507/-133 | 37 | +| @jennijuju | 27 | +333/-178 | 54 | +| @TheMenko | 8 | +237/-179 | 17 | +| c r | 2 | +227/-45 | 12 | +| @dirkmck | 12 | +188/-40 | 27 | +| @ribasushi | 3 | +128/-62 | 3 | +| @raulk | 6 | +128/-49 | 9 | +| @Whyrusleeping | 1 | +76/-70 | 8 | +| @Stebalien | 1 | +55/-37 | 1 | +| @jennijuju | 11 | +29/-16 | 11 | +| @aarshkshah1992 | 1 | +23/-19 | 5 | +| @travisperson | 1 | +0/-18 | 2 | +| @gstuart | 3 | +12/-1 | 3 | +| @coryschwartz | 4 | +5/-6 | 4 | +| @pefish | 1 | +4/-3 | 1 | +| @Kubuxu | 1 | +5/-2 | 2 | +| Colin Kennedy | 1 | +4/-2 | 1 | +| Rob Quist | 1 | +2/-2 | 1 | +| @shotcollin | 1 | +1/-1 | 1 | + + +# 1.14.4 / 2022-03-03 + +This is a *highly recommended* optional release for storage providers that are doing snap deals. This fix the bug +that causes some snap deal sectors are stuck in `FinalizeReplicaUpdate`. In addition, SPs should be able to force +update sectors status without getting blocked by `normal shutdown of state machine`. + +# v1.14.3 / 2022-02-28 + +This is an **optional** release, that includes a fix to properly register the `--really-do-it` flag for abort-upgrade. + # 1.14.2 / 2022-02-24 -This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). +This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s). -Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out! +Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out! ## Bug Fixes - fix lotus-bench for sealing jobs (#8173) @@ -15,8 +144,8 @@ Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-proje - fix: sealing: missing file type (#8180) ## Others -- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong - commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore, +- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong + commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore, we want to retract it and users may use v1.14.1&^. ## Contributors @@ -35,7 +164,7 @@ This is an **optional** release of lotus, that fixes the incorrect *comment* of # 1.14.0 / 2022-02-17 -This is a MANDATORY release of Lotus that introduces [Filecoin network v15, +This is a MANDATORY release of Lotus that introduces [Filecoin network v15, codenamed the OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550). The network is scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to this release (or a later release) before that time. Storage providers must update their daemons, miners, and worker(s). @@ -52,7 +181,7 @@ It is recommended that storage providers download the new params before updating - Upgrade the Lotus daemon and miner **when the previous step is complete** All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries. - + ## New Features and Changes - Integrate actor v7-rc1: - Integrate v7 actors ([#7617](https://github.com/filecoin-project/lotus/pull/7617)) @@ -78,7 +207,7 @@ All node operators, including storage providers, should be aware that a pre-migr - Fix: state: circsuypply calc around null blocks ([#7890](https://github.com/filecoin-project/lotus/pull/7890)) - Mempool msg selection should respect block message limits ([#7321](https://github.com/filecoin-project/lotus/pull/7321)) SplitStore: supress compaction near upgrades ([#7734](https://github.com/filecoin-project/lotus/pull/7734)) - + ## Others - chore: create pull_request_template.md ([#7726](https://github.com/filecoin-project/lotus/pull/7726)) @@ -103,13 +232,13 @@ All node operators, including storage providers, should be aware that a pre-migr # v1.13.2 / 2022-01-09 -Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like -worker management, schedule enhancements and so on. +Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like +worker management, schedule enhancements and so on. ## Highlights - 🚀🚀🚀Improve retrieval deal experience - - Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer - speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new + - Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer + speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new retrieval APIs to provide a better client experience. - 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693)) - This change makes most free, small retrievals sub-second @@ -201,7 +330,7 @@ worker management, schedule enhancements and so on. | @jennijuju | 1 | +1/-1 | 1 | | @hunjixin | 1 | +1/-0 | 1 | - + # v1.13.1 / 2021-11-26 @@ -284,32 +413,32 @@ Contributors | @hannahhoward | 1 | +3/-2 | 2 | | Marten Seemann | 1 | +3/-0 | 1 | | @ZenGround0 | 1 | +1/-1 | 1 | - + # v1.13.0 / 2021-10-18 -Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming +Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming [Network v14 Chocolate upgrade](https://github.com/filecoin-project/lotus/discussions/7431). This feature release includes the latest functionalities and improvements, like data transfer rate-limiting for both storage and retrieval deals, proof v10 with CUDA support, etc. You can find more details in the Changelog below. ## Highlights - Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405)) - - `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20. - - If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`. - - If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`. + - `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20. + - If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`. + - If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`. - Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420)) - - This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`. - - You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time. + - This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`. + - You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time. ## New Features - Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393)) - - This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents. - - For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output` + - This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents. + - For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output` - Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418)) - feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341)) - - Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers. + - Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers. - Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412)) - - You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml). + - You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml). - lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355)) - Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328)) - Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308)) @@ -326,7 +455,7 @@ This feature release includes the latest functionalities and improvements, like - Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306)) - Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304)) - Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305)) - - Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474)) +- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474)) ## Bug Fixes - StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450)) @@ -381,7 +510,7 @@ This feature release includes the latest functionalities and improvements, like # v1.12.0 / 2021-10-12 -This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z. +This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z. The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0) @@ -395,7 +524,7 @@ The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](ht Note that this release is built on top of lotus v1.11.3. Enterprising users like storage providers, data brokers and others are recommended to use lotus v1.13.0 for latest new features, improvements and bug fixes. ## New Features and Changes -- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment: +- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment: - Precommit batch balancer support/config ([filecoin-project/lotus#7410](https://github.com/filecoin-project/lotus/pull/7410)) - Set `BatchPreCommitAboveBaseFee` to decide whether sending out a PreCommits in individual messages or in a batch. - The default value of `BatchPreCommitAboveBaseFee` and `AggregateAboveBaseFee` are now updated to 0.32nanoFIL. @@ -412,17 +541,17 @@ Note that this release is built on top of lotus v1.11.3. Enterprising users like ## Dependency Updates - Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0) - **Protocol changes** - - Multisig Approve only hashes when hash in params - - FIP 0020 WithdrawBalance methods return withdrawn value - - FIP 0021 Fix bug in power calculation when extending verified deals sectors - - FIP 0022 PublishStorageDeals drops errors in batch - - FIP 0024 BatchBalancer update and burn added to PreCommitBatch - - FIP 0026 Add FaultMaxAge extension - - Reduce calls to power and reward actors by passing values from power cron - - Defensive programming hardening power cron against programmer error + - Multisig Approve only hashes when hash in params + - FIP 0020 WithdrawBalance methods return withdrawn value + - FIP 0021 Fix bug in power calculation when extending verified deals sectors + - FIP 0022 PublishStorageDeals drops errors in batch + - FIP 0024 BatchBalancer update and burn added to PreCommitBatch + - FIP 0026 Add FaultMaxAge extension + - Reduce calls to power and reward actors by passing values from power cron + - Defensive programming hardening power cron against programmer error - **Implementation changes** - - Move to xerrors - - Improved logging: burn events are not logged with reasons and burned value. + - Move to xerrors + - Improved logging: burn events are not logged with reasons and burned value. - github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379): ## Others @@ -447,20 +576,20 @@ Note that this release is built on top of lotus v1.11.3. Enterprising users like # v1.11.3 / 2021-09-29 -lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node -operators, storage providers and clients. It includes many improvements and bug fixes that result in perf +lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node +operators, storage providers and clients. It includes many improvements and bug fixes that result in perf improvements in different area, like deal making, sealing and so on. ## Highlights - 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276)) - - Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large. + - Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large. - 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140)) - - run `./lotus-miner sectors expired -h` for more details. + - run `./lotus-miner sectors expired -h` for more details. - 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369)) - - MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation! + - MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation! - 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234)) - - This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals. + - This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals. - 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108)) ## New Features @@ -531,7 +660,7 @@ improvements in different area, like deal making, sealing and so on. - Turn off patch ([filecoin-project/lotus#7172](https://github.com/filecoin-project/lotus/pull/7172)) - test: disable flaky TestSimultaneousTransferLimit ([filecoin-project/lotus#7153](https://github.com/filecoin-project/lotus/pull/7153)) - + ## Contributors | Contributor | Commits | Lines ± | Files Changed | @@ -563,46 +692,46 @@ improvements in different area, like deal making, sealing and so on. # v1.11.2 / 2021-09-06 -lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators, -storage providers and clients. +lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators, +storage providers and clients. ## Highlights - 🌟🌟🌟 Introduce Dagstore and CARv2 for deal-making (#6671) ([filecoin-project/lotus#6671](https://github.com/filecoin-project/lotus/pull/6671)) - - **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a + - **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a component of the `markets` subsystem in lotus-miner. It is a sharded store to hold large IPLD graphs efficiently, - packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It + packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It is designed to provide high efficiency and throughput, and minimize resource utilization during deal-making operations. - The dagstore also leverages the indexing features of [CARv2](https://github.com/ipld/ipld/blob/master/specs/transport/car/carv2/index.md) to enable plan CAR files to act as read and write - blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval + The dagstore also leverages the indexing features of [CARv2](https://github.com/ipld/ipld/blob/master/specs/transport/car/carv2/index.md) to enable plan CAR files to act as read and write + blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval deal making without requiring intermediate buffers. - - In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal - making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's + - In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal + making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's conceptual overview, terminology, directory structure, configuration and so on. - - **Note**: - - When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly + - **Note**: + - When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly recommend storage providers to read this [section](https://docs.filecoin.io/mine/lotus/dagstore/#first-time-migration) to learn more about what the process does, what to expect and how monitor it. - - It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that + - It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost. - ‼️Having your dags initialized will become important in the near feature for you to provide a better storage - and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process - places relatively high IP workload on your storage system and is better to be carried out gradually and over a - longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization). + and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process + places relatively high IP workload on your storage system and is better to be carried out gradually and over a + longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization). - ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX) release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic. - - 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!** + - 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!** - 🌟🌟 rpcenc: Support reader redirect ([filecoin-project/lotus#6952](https://github.com/filecoin-project/lotus/pull/6952)) - This allows market processes to send piece bytes directly to workers involved on `AddPiece`. - Extending sectors: more practical and flexible tools ([filecoin-project/lotus#6097](https://github.com/filecoin-project/lotus/pull/6097)) - - `lotus-miner sectors check-expire` to inspect expiring sectors. - - `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option - like `extension`, `new-expiration` and so on. + - `lotus-miner sectors check-expire` to inspect expiring sectors. + - `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option + like `extension`, `new-expiration` and so on. - ‼️ MpoolReplaceCmd ( lotus mpool replace`) now takes FIL for fee-limit ([filecoin-project/lotus#6927](https://github.com/filecoin-project/lotus/pull/6927)) - Drop townhall/chainwatch ([filecoin-project/lotus#6912](https://github.com/filecoin-project/lotus/pull/6912)) - - ChainWatch is no longer supported by lotus. + - ChainWatch is no longer supported by lotus. - Configurable CC Sector Expiration ([filecoin-project/lotus#6803](https://github.com/filecoin-project/lotus/pull/6803)) - - Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC - sector, value must be between 180-540 days inclusive. + - Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC + sector, value must be between 180-540 days inclusive. ## New Features - api/command for encoding actor params ([filecoin-project/lotus#7150](https://github.com/filecoin-project/lotus/pull/7150)) @@ -669,10 +798,10 @@ storage providers and clients. - remove m1 templates and make area selection multi-optionable ([filecoin-project/lotus#7121](https://github.com/filecoin-project/lotus/pull/7121)) - release -> master ([filecoin-project/lotus#7105](https://github.com/filecoin-project/lotus/pull/7105)) - Lotus release process - how we make releases ([filecoin-project/lotus#6944](https://github.com/filecoin-project/lotus/pull/6944)) -- codecov: fix mock name ([filecoin-project/lotus#7039](https://github.com/filecoin-project/lotus/pull/7039)) +- codecov: fix mock name ([filecoin-project/lotus#7039](https://github.com/filecoin-project/lotus/pull/7039)) - codecov: fix regexes ([filecoin-project/lotus#7037](https://github.com/filecoin-project/lotus/pull/7037)) -- chore: disable flaky test ([filecoin-project/lotus#6957](https://github.com/filecoin-project/lotus/pull/6957)) -- set buildtype in nerpa and butterfly ([filecoin-project/lotus#6085](https://github.com/filecoin-project/lotus/pull/6085)) +- chore: disable flaky test ([filecoin-project/lotus#6957](https://github.com/filecoin-project/lotus/pull/6957)) +- set buildtype in nerpa and butterfly ([filecoin-project/lotus#6085](https://github.com/filecoin-project/lotus/pull/6085)) - release v1.11.1 backport -> master ([filecoin-project/lotus#6929](https://github.com/filecoin-project/lotus/pull/6929)) - chore: fixup issue templates ([filecoin-project/lotus#6899](https://github.com/filecoin-project/lotus/pull/6899)) - bump master version to v1.11.2-dev ([filecoin-project/lotus#6903](https://github.com/filecoin-project/lotus/pull/6903)) @@ -714,15 +843,15 @@ Contributors > Note: for discussion about this release, please comment [here](https://github.com/filecoin-project/lotus/discussions/6904) -This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes. +This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes. ## Highlights - ⭐️⭐️⭐️[**lotus-miner market subsystem**](https://docs.filecoin.io/mine/lotus/split-markets-miners/#frontmatter-title) is introduced in this release! It is **highly recommended** for storage providers to run markets processes on a separate machine! Doing so, only this machine needs to exposes public ports for deal making. This also means that the other miner operations can now be completely isolated by from the deal making processes and storage providers can stop and restarts the markets process without affecting an ongoing Winning/Window PoSt! - - More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts). + - More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts). - Base on your system setup(running on separate machines, same machine and so on), please see the suggested practice by community members [here](https://github.com/filecoin-project/lotus/discussions/7047#discussion-3515335). - Note: if you are running lotus-worker on a different machine, you will need to set `MARKETS_API_INFO` for certain CLI to work properly. This will be improved by #7072. - Huge thanks to MinerX fellows for [helping testing the implementation, reporting the issues so they were fixed by now and providing feedbacks](https://github.com/filecoin-project/lotus/discussions/6861) to user docs in the past three weeks! -- Config for collateral from miner available balance ([filecoin-project/lotus#6629](https://github.com/filecoin-project/lotus/pull/6629)) +- Config for collateral from miner available balance ([filecoin-project/lotus#6629](https://github.com/filecoin-project/lotus/pull/6629)) - Better control your sector collateral payment by setting `CollateralFromMinerBalance`, `AvailableBalanceBuffer` and `DisableCollateralFallback`. - `CollateralFromMinerBalance`: whether to use available miner balance for sector collateral instead of sending it with each message, default is `false`. - `AvailableBalanceBuffer`: minimum available balance to keep in the miner actor before sending it with messages, default is 0FIL. @@ -730,114 +859,114 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd - Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697)) - Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://docs.filecoin.io/mine/lotus/miner-addresses/#control-addresses). - Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848)) - - You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated` - + - You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated` + ## New Features - - ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356)) - - **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) : - - Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867)) - - Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854)) - - splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811)) - - fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841)) - - Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833)) - - cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760)) - - Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775)) - - Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817)) - - Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777)) - - Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762)) - - Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756)) - - Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754)) - - Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474)) - - lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840)) - - add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888)) - - Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638)) - - add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640)) - - Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield. - - Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642)) - - Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743)) - - Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696)) - - Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694)) - - Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620)) - - commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650)) - - shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631)) - +- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356)) +- **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) : + - Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867)) + - Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854)) + - splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811)) + - fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841)) + - Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833)) + - cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760)) + - Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775)) + - Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817)) + - Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777)) + - Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762)) + - Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756)) + - Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754)) + - Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474)) +- lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840)) +- add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888)) +- Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638)) +- add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640)) + - Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield. +- Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642)) +- Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743)) + - Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696)) + - Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694)) +- Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620)) +- commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650)) +- shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631)) + ## Bug Fixes - - Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683)) - - fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724)) - - fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802)) - - test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804)) - - wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800)) - - fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787)) - - ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739)) - - Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066)) - - fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688)) - - Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679)) - - Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680)) - - storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653)) - - Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664)) - - Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658)) - - fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635)) - - remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634)) - - fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625)) - +- Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683)) +- fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724)) +- fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802)) +- test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804)) +- wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800)) +- fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787)) +- ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739)) +- Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066)) +- fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688)) +- Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679)) +- Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680)) +- storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653)) +- Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664)) +- Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658)) +- fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635)) +- remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634)) +- fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625)) + ## Improvements - - Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895)) - - rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886)) - - fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862)) - - ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872)) - - feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857)) - - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) - - easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183)) - - api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818)) - - add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815)) - - sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763)) - - ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807)) - - test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758)) - - Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708)) - - add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950)) - - add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690)) - - get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681)) - - polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639)) - - Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672)) - - add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663)) - - commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647)) - - Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638)) - - Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636)) - +- Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895)) +- rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886)) +- fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862)) +- ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872)) +- feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857)) +- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) +- easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183)) +- api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818)) +- add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815)) +- sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763)) +- ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807)) +- test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758)) +- Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708)) +- add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950)) +- add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690)) +- get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681)) +- polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639)) +- Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672)) +- add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663)) +- commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647)) +- Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638)) +- Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636)) + ## Dependency Updates - - Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890)) - - update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885)) - - Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674)) - - github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2): - - github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2): - - github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1) - - github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e) - - github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1) - - update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764)) - +- Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890)) +- update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885)) +- Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674)) +- github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2): +- github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2): +- github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1) +- github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e) +- github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1) +- update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764)) + ## Others - - Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051)) - - v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966)) - - Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965)) - - Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933)) - - Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900)) - - Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897)) - - Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880)) - - Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879)) - - Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856)) - - Fix links in issue templates - - Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798) - - Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837)) - - add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) - - release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828)) - - Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773)) - - Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757)) - - Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747)) - - Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710)) - - Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) - - Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) - - Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) - +- Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051)) +- v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966)) +- Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965)) +- Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933)) +- Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900)) +- Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897)) +- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880)) +- Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879)) +- Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856)) +- Fix links in issue templates +- Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798) +- Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837)) +- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544)) +- release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828)) +- Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773)) +- Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757)) +- Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747)) +- Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710)) +- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) +- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) +- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) + ## Contributors | Contributor | Commits | Lines ± | Files Changed | @@ -878,7 +1007,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd | dependabot[bot] | 1 | +3/-3 | 1 | | zhoutian527 | 1 | +2/-2 | 1 | | xloem | 1 | +4/-0 | 1 | -| @travisperson| 2 | +2/-2 | 3 | +| | 2 | +2/-2 | 3 | | Liviu Damian | 2 | +2/-2 | 2 | | @jimpick | 2 | +2/-2 | 2 | | Frank | 1 | +3/-0 | 1 | @@ -887,9 +1016,10 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd # 1.11.0 / 2021-07-22 -This is a **highly recommended** release of Lotus that have many bug fixes, improvements and new features. +This is a **highly recommended** release of Lotus that have many bug fixes, improvements and new features. ## Highlights +- Miner SimultaneousTransfers config ([filecoin-project/lotus#6612](https://github.com/filecoin-project/lotus/pull/6612)) - Miner SimultaneousTransfers config ([filecoin-project/lotus#6612](https://github.com/filecoin-project/lotus/pull/6612)) - Set `SimultaneousTransfers` in lotus miner config to configure the maximum number of parallel online data transfers, including both storage and retrieval deals. - Dynamic Retrieval pricing ([filecoin-project/lotus#6175](https://github.com/filecoin-project/lotus/pull/6175)) @@ -898,7 +1028,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - run `lotus mpool manage and follow the instructions! - Demo available at https://www.youtube.com/watch?v=QDocpLQjZgQ. - Add utils to use multisigs as miner owners ([filecoin-project/lotus#6490](https://github.com/filecoin-project/lotus/pull/6490)) - + ## More New Features - feat: implement lotus-sim ([filecoin-project/lotus#6406](https://github.com/filecoin-project/lotus/pull/6406)) - implement a command to export a car ([filecoin-project/lotus#6405](https://github.com/filecoin-project/lotus/pull/6405)) @@ -915,11 +1045,11 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Transplant some useful commands to lotus-shed actor ([filecoin-project/lotus#5913](https://github.com/filecoin-project/lotus/pull/5913)) - run `lotus-shed actor` - actor wrapper codegen ([filecoin-project/lotus#6108](https://github.com/filecoin-project/lotus/pull/6108)) -- Add a shed util to count miners by post type ([filecoin-project/lotus#6169](https://github.com/filecoin-project/lotus/pull/6169)) +- Add a shed util to count miners by post type ([filecoin-project/lotus#6169](https://github.com/filecoin-project/lotus/pull/6169)) - shed: command to list duplicate messages in tipsets (steb) ([filecoin-project/lotus#5847](https://github.com/filecoin-project/lotus/pull/5847)) - feat: allow checkpointing to forks ([filecoin-project/lotus#6107](https://github.com/filecoin-project/lotus/pull/6107)) -- Add a CLI tool for miner proving deadline ([filecoin-project/lotus#6132](https://github.com/filecoin-project/lotus/pull/6132)) - - run `lotus state miner-proving-deadline` +- Add a CLI tool for miner proving deadline ([filecoin-project/lotus#6132](https://github.com/filecoin-project/lotus/pull/6132)) + - run `lotus state miner-proving-deadline` ## Bug Fixes @@ -928,7 +1058,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Make query-ask CLI more graceful ([filecoin-project/lotus#6590](https://github.com/filecoin-project/lotus/pull/6590)) - scale up sector expiration to avoid sector expire in batch-pre-commit waitting ([filecoin-project/lotus#6566](https://github.com/filecoin-project/lotus/pull/6566)) - Fix an error in msigLockCancel ([filecoin-project/lotus#6582](https://github.com/filecoin-project/lotus/pull/6582) -- fix circleci being out of sync. ([filecoin-project/lotus#6573](https://github.com/filecoin-project/lotus/pull/6573)) +- fix circleci being out of sync. ([filecoin-project/lotus#6573](https://github.com/filecoin-project/lotus/pull/6573)) - Fix helptext for ask price([filecoin-project/lotus#6560](https://github.com/filecoin-project/lotus/pull/6560)) - fix commit finalize failed ([filecoin-project/lotus#6521](https://github.com/filecoin-project/lotus/pull/6521)) - Fix soup ([filecoin-project/lotus#6501](https://github.com/filecoin-project/lotus/pull/6501)) @@ -953,7 +1083,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Speed up StateListMessages in some cases ([filecoin-project/lotus#6007](https://github.com/filecoin-project/lotus/pull/6007)) - fix(splitstore): fix a panic on revert-only head changes ([filecoin-project/lotus#6133](https://github.com/filecoin-project/lotus/pull/6133)) - drand: fix beacon cache ([filecoin-project/lotus#6164](https://github.com/filecoin-project/lotus/pull/6164)) - + ## Improvements - gateway: Add support for Version method ([filecoin-project/lotus#6618](https://github.com/filecoin-project/lotus/pull/6618)) - revamped integration test kit (aka. Operation Sparks Joy) ([filecoin-project/lotus#6329](https://github.com/filecoin-project/lotus/pull/6329)) @@ -988,7 +1118,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Remove log line when tracing is not configured ([filecoin-project/lotus#6334](https://github.com/filecoin-project/lotus/pull/6334)) - separate tracing environment variables ([filecoin-project/lotus#6323](https://github.com/filecoin-project/lotus/pull/6323)) - feat: log dispute rate ([filecoin-project/lotus#6322](https://github.com/filecoin-project/lotus/pull/6322)) -- Move verifreg shed utils to CLI ([filecoin-project/lotus#6135](https://github.com/filecoin-project/lotus/pull/6135)) +- Move verifreg shed utils to CLI ([filecoin-project/lotus#6135](https://github.com/filecoin-project/lotus/pull/6135)) - consider storiface.PathStorage when calculating storage requirements ([filecoin-project/lotus#6233](https://github.com/filecoin-project/lotus/pull/6233)) - `storage` module: add go docs and minor code quality refactors ([filecoin-project/lotus#6259](https://github.com/filecoin-project/lotus/pull/6259)) - Increase data transfer timeouts ([filecoin-project/lotus#6300](https://github.com/filecoin-project/lotus/pull/6300)) @@ -1005,7 +1135,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Testground checks on push ([filecoin-project/lotus#5887](https://github.com/filecoin-project/lotus/pull/5887)) - Use EmptyTSK where appropriate ([filecoin-project/lotus#6134](https://github.com/filecoin-project/lotus/pull/6134)) - upgrade `lotus-soup` testplans and reduce deals concurrency to a single miner ([filecoin-project/lotus#6122](https://github.com/filecoin-project/lotus/pull/6122) - + ## Dependency Updates - downgrade libp2p/go-libp2p-yamux to v0.5.1. ([filecoin-project/lotus#6605](https://github.com/filecoin-project/lotus/pull/6605)) - Update libp2p to 0.14.2 ([filecoin-project/lotus#6404](https://github.com/filecoin-project/lotus/pull/6404)) @@ -1013,7 +1143,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Use new actor tags ([filecoin-project/lotus#6291](https://github.com/filecoin-project/lotus/pull/6291)) - chore: update go-libp2p ([filecoin-project/lotus#6231](https://github.com/filecoin-project/lotus/pull/6231)) - Update ffi to proofs v7 ([filecoin-project/lotus#6150](https://github.com/filecoin-project/lotus/pull/6150)) - + ## Others - Initial draft: basic build instructions on Readme ([filecoin-project/lotus#6498](https://github.com/filecoin-project/lotus/pull/6498)) - Remove rc changelog, compile the new changelog for final release only ([filecoin-project/lotus#6444](https://github.com/filecoin-project/lotus/pull/6444)) @@ -1031,8 +1161,8 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr - Introduce a release issue template ([filecoin-project/lotus#5826](https://github.com/filecoin-project/lotus/pull/5826)) - This is a 1:1 forward-port of PR#6183 from 1.9.x to master ([filecoin-project/lotus#6196](https://github.com/filecoin-project/lotus/pull/6196)) - Update cli gen ([filecoin-project/lotus#6155](https://github.com/filecoin-project/lotus/pull/6155)) -- Generate CLI docs ([filecoin-project/lotus#6145](https://github.com/filecoin-project/lotus/pull/6145)) - +- Generate CLI docs ([filecoin-project/lotus#6145](https://github.com/filecoin-project/lotus/pull/6145)) + ## Contributors | Contributor | Commits | Lines ± | Files Changed | @@ -1044,7 +1174,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr | @Stebalien | 106 | +7653/-2718 | 273 | | dirkmc | 11 | +2580/-1371 | 77 | | @dirkmc | 39 | +1865/-1194 | 79 | -| @Kubuxu | 19 | +1973/-485 | 81 | +| | 19 | +1973/-485 | 81 | | @vyzo | 4 | +1748/-330 | 50 | | @aarshkshah1992 | 5 | +1462/-213 | 27 | | @coryschwartz | 35 | +568/-206 | 59 | @@ -1081,10 +1211,10 @@ This is an optional but **highly recommended** release of Lotus for lotus miners ## New Features - commit batch: AggregateAboveBaseFee config #6650 - `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation! - + ## Bug Fixes - storage: Fix FinalizeSector with sectors in storage paths #6652 -- Fix tiny error in check-client-datacap #6664 +- Fix tiny error in check-client-datacap #6664 - Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658 - to optimize the batchwait #6636 - fix getTicket: sector precommitted but expired case #6635 @@ -1113,10 +1243,10 @@ This is an optional but **highly recommended** release of Lotus for lotus miners ## New Features - commit batch: AggregateAboveBaseFee config #6650 - `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation! - + ## Bug Fixes - storage: Fix FinalizeSector with sectors in storage paths #6652 -- Fix tiny error in check-client-datacap #6664 +- Fix tiny error in check-client-datacap #6664 - Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658 - to optimize the batchwait #6636 - fix getTicket: sector precommitted but expired case #6635 @@ -1159,10 +1289,10 @@ FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.m **Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.** Note: - - We recommend to keep `PreCommitSectorsBatch` as 1. - - We recommend miners to set `PreCommitBatchWait` lower than 30 hours. - - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures - due to expirations. +- We recommend to keep `PreCommitSectorsBatch` as 1. +- We recommend miners to set `PreCommitBatchWait` lower than 30 hours. +- We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures + due to expirations. ### Projected state tree growth @@ -1173,9 +1303,9 @@ Given these assumptions: - We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅 - We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day. - We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day. - - Nearly all of the state-tree growth is expected to come from new sector metadata. + - Nearly all of the state-tree growth is expected to come from new sector metadata. - We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day). - - Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree. + - Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree. ### Future improvements @@ -2864,4 +2994,4 @@ We are grateful for every contribution! We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues). -A huge thank you to all contributors for this testnet release! +A huge thank you to all contributors for this testnet release! \ No newline at end of file diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 812ad9f61..48a68ef6f 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -53,8 +53,9 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/ COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/ COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/ -RUN useradd -r -u 532 -U fc - +RUN useradd -r -u 532 -U fc \ + && mkdir -p /etc/OpenCL/vendors \ + && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd ### FROM base AS lotus diff --git a/api/api_storage.go b/api/api_storage.go index dc7003cfe..be46fdb2f 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -92,7 +92,8 @@ type StorageMiner interface { SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin // SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can // be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties. - SectorRemove(context.Context, abi.SectorNumber) error //perm:admin + SectorRemove(context.Context, abi.SectorNumber) error //perm:admin + SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin // SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then // automatically removes it from storage SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin @@ -100,8 +101,7 @@ type StorageMiner interface { // Returns null if message wasn't sent SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message - SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin - SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin + SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. // Returns null if message wasn't sent SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin diff --git a/api/api_test.go b/api/api_test.go index e65d50ca3..15f59a486 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -1,3 +1,4 @@ +//stm: #unit package api import ( @@ -26,6 +27,7 @@ func goCmd() string { } func TestDoesntDependOnFFI(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_001 deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output() if err != nil { t.Fatal(err) @@ -38,6 +40,7 @@ func TestDoesntDependOnFFI(t *testing.T) { } func TestDoesntDependOnBuild(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_002 deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output() if err != nil { t.Fatal(err) @@ -50,6 +53,7 @@ func TestDoesntDependOnBuild(t *testing.T) { } func TestReturnTypes(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_001 errType := reflect.TypeOf(new(error)).Elem() bareIface := reflect.TypeOf(new(interface{})).Elem() jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem() @@ -115,6 +119,7 @@ func TestReturnTypes(t *testing.T) { } func TestPermTags(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_PERM_TAGS_001 _ = PermissionedFullAPI(&FullNodeStruct{}) _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) _ = PermissionedWorkerAPI(&WorkerStruct{}) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 1190b0dc4..2579610fe 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -122,7 +122,7 @@ func init() { addExample(api.FullAPIVersion1) addExample(api.PCHInbound) addExample(time.Minute) - addExample(graphsync.RequestID(4)) + addExample(graphsync.NewRequestID()) addExample(datatransfer.TransferID(3)) addExample(datatransfer.Ongoing) addExample(storeIDExample) diff --git a/api/proxy_util_test.go b/api/proxy_util_test.go index 3cbc466b6..57162acd8 100644 --- a/api/proxy_util_test.go +++ b/api/proxy_util_test.go @@ -1,3 +1,4 @@ +//stm: #unit package api import ( @@ -29,6 +30,7 @@ type StrC struct { } func TestGetInternalStructs(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001 var proxy StrA sts := GetInternalStructs(&proxy) @@ -44,6 +46,7 @@ func TestGetInternalStructs(t *testing.T) { } func TestNestedInternalStructs(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001 var proxy StrC // check that only the top-level internal struct gets picked up diff --git a/api/types.go b/api/types.go index c688edf4b..a91c27d26 100644 --- a/api/types.go +++ b/api/types.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + "github.com/libp2p/go-libp2p-core/network" + datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" @@ -12,7 +14,6 @@ import ( "github.com/ipfs/go-cid" "github.com/ipfs/go-graphsync" - "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" ma "github.com/multiformats/go-multiaddr" @@ -58,7 +59,7 @@ type MessageSendSpec struct { // GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync type GraphSyncDataTransfer struct { // GraphSync request id for this transfer - RequestID graphsync.RequestID + RequestID *graphsync.RequestID // Graphsync state for this transfer RequestState string // If a channel ID is present, indicates whether this is the current graphsync request for this channel @@ -124,12 +125,6 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta return channel } -type NetBlockList struct { - Peers []peer.ID - IPAddrs []string - IPSubnets []string -} - type NetStat struct { System *network.ScopeStat `json:",omitempty"` Transient *network.ScopeStat `json:",omitempty"` @@ -152,6 +147,12 @@ type NetLimit struct { FD int } +type NetBlockList struct { + Peers []peer.ID + IPAddrs []string + IPSubnets []string +} + type ExtendedPeerInfo struct { ID peer.ID Agent string diff --git a/api/version.go b/api/version.go index 9f4f73513..cc0c7b270 100644 --- a/api/version.go +++ b/api/version.go @@ -57,8 +57,8 @@ var ( FullAPIVersion0 = newVer(1, 5, 0) FullAPIVersion1 = newVer(2, 2, 0) - MinerAPIVersion0 = newVer(1, 4, 0) - WorkerAPIVersion0 = newVer(1, 5, 0) + MinerAPIVersion0 = newVer(1, 5, 0) + WorkerAPIVersion0 = newVer(1, 6, 0) ) //nolint:varcheck,deadcode diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index 4619d4ec3..470326123 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -1,10 +1,10 @@ +//stm: #unit package badgerbs import ( "bytes" "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -20,6 +20,8 @@ import ( ) func TestBadgerBlockstore(t *testing.T) { + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 (&Suite{ NewBlockstore: newBlockstore(DefaultOptions), OpenBlockstore: openBlockstore(DefaultOptions), @@ -38,6 +40,8 @@ func TestBadgerBlockstore(t *testing.T) { } func TestStorageKey(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_STORAGE_KEY_001 bs, _ := newBlockstore(DefaultOptions)(t) bbs := bs.(*Blockstore) defer bbs.Close() //nolint:errcheck @@ -73,20 +77,13 @@ func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) ( return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) { tb.Helper() - path, err := ioutil.TempDir("", "") - if err != nil { - tb.Fatal(err) - } + path = tb.TempDir() db, err := Open(optsSupplier(path)) if err != nil { tb.Fatal(err) } - tb.Cleanup(func() { - _ = os.RemoveAll(path) - }) - return db, path } } @@ -100,17 +97,10 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, func testMove(t *testing.T, optsF func(string) Options) { ctx := context.Background() - basePath, err := ioutil.TempDir("", "") - if err != nil { - t.Fatal(err) - } + basePath := t.TempDir() dbPath := filepath.Join(basePath, "db") - t.Cleanup(func() { - _ = os.RemoveAll(basePath) - }) - db, err := Open(optsF(dbPath)) if err != nil { t.Fatal(err) @@ -265,10 +255,16 @@ func testMove(t *testing.T, optsF func(string) Options) { } func TestMoveNoPrefix(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001 testMove(t, DefaultOptions) } func TestMoveWithPrefix(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001 testMove(t, func(path string) Options { opts := DefaultOptions(path) opts.Prefix = "/prefixed/" diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go index 167d1b2ab..93b268a65 100644 --- a/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -1,3 +1,4 @@ +//stm: #unit package badgerbs import ( @@ -44,6 +45,8 @@ func (s *Suite) RunTests(t *testing.T, prefix string) { } func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -57,6 +60,8 @@ func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { } func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_GET_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -68,6 +73,9 @@ func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { } func (s *Suite) TestPutThenGetBlock(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_GET_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -85,6 +93,8 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) { } func (s *Suite) TestHas(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -106,6 +116,9 @@ func (s *Suite) TestHas(t *testing.T) { } func (s *Suite) TestCidv0v1(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_GET_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -123,6 +136,9 @@ func (s *Suite) TestCidv0v1(t *testing.T) { } func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_GET_SIZE_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) @@ -154,6 +170,8 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { } func (s *Suite) TestAllKeysSimple(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -170,6 +188,9 @@ func (s *Suite) TestAllKeysSimple(t *testing.T) { } func (s *Suite) TestAllKeysRespectsContext(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001 bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -200,6 +221,7 @@ func (s *Suite) TestAllKeysRespectsContext(t *testing.T) { } func (s *Suite) TestDoubleClose(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 bs, _ := s.NewBlockstore(t) c, ok := bs.(io.Closer) if !ok { @@ -210,6 +232,9 @@ func (s *Suite) TestDoubleClose(t *testing.T) { } func (s *Suite) TestReopenPutGet(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_GET_001 ctx := context.Background() bs, path := s.NewBlockstore(t) c, ok := bs.(io.Closer) @@ -236,6 +261,10 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } func (s *Suite) TestPutMany(t *testing.T) { + //stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001 + //stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_PUT_MANY_001 + //stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001 ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { @@ -268,6 +297,11 @@ func (s *Suite) TestPutMany(t *testing.T) { } func (s *Suite) TestDelete(t *testing.T) { + //stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001 + //stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_POOLED_STORAGE_HAS_001 + //stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001, @SPLITSTORE_BADGER_HAS_001 + //stm: @SPLITSTORE_BADGER_PUT_MANY_001 + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { diff --git a/blockstore/splitstore/checkpoint_test.go b/blockstore/splitstore/checkpoint_test.go index 4fefe40cf..241707d65 100644 --- a/blockstore/splitstore/checkpoint_test.go +++ b/blockstore/splitstore/checkpoint_test.go @@ -1,8 +1,6 @@ package splitstore import ( - "io/ioutil" - "os" "path/filepath" "testing" @@ -11,14 +9,7 @@ import ( ) func TestCheckpoint(t *testing.T) { - dir, err := ioutil.TempDir("", "checkpoint.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(dir) - }) + dir := t.TempDir() path := filepath.Join(dir, "checkpoint") diff --git a/blockstore/splitstore/coldset_test.go b/blockstore/splitstore/coldset_test.go index 60216ebd4..8fc23a68e 100644 --- a/blockstore/splitstore/coldset_test.go +++ b/blockstore/splitstore/coldset_test.go @@ -2,8 +2,6 @@ package splitstore import ( "fmt" - "io/ioutil" - "os" "path/filepath" "testing" @@ -12,14 +10,7 @@ import ( ) func TestColdSet(t *testing.T) { - dir, err := ioutil.TempDir("", "coldset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(dir) - }) + dir := t.TempDir() path := filepath.Join(dir, "coldset") diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index b4b871602..4d67909b0 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -1,8 +1,7 @@ +//stm: #unit package splitstore import ( - "io/ioutil" - "os" "testing" cid "github.com/ipfs/go-cid" @@ -10,6 +9,8 @@ import ( ) func TestMapMarkSet(t *testing.T) { + //stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001 + //stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001 testMarkSet(t, "map") testMarkSetRecovery(t, "map") testMarkSetMarkMany(t, "map") @@ -18,6 +19,8 @@ func TestMapMarkSet(t *testing.T) { } func TestBadgerMarkSet(t *testing.T) { + //stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001 + //stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001 bs := badgerMarkSetBatchSize badgerMarkSetBatchSize = 1 t.Cleanup(func() { @@ -31,14 +34,7 @@ func TestBadgerMarkSet(t *testing.T) { } func testMarkSet(t *testing.T, lsType string) { - path, err := ioutil.TempDir("", "markset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() env, err := OpenMarkSetEnv(path, lsType) if err != nil { @@ -46,6 +42,7 @@ func testMarkSet(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck + // stm: @SPLITSTORE_MARKSET_CREATE_001 hotSet, err := env.New("hot", 0) if err != nil { t.Fatal(err) @@ -65,6 +62,7 @@ func testMarkSet(t *testing.T, lsType string) { return cid.NewCidV1(cid.Raw, h) } + // stm: @SPLITSTORE_MARKSET_HAS_001 mustHave := func(s MarkSet, cid cid.Cid) { t.Helper() has, err := s.Has(cid) @@ -94,6 +92,7 @@ func testMarkSet(t *testing.T, lsType string) { k3 := makeCid("c") k4 := makeCid("d") + // stm: @SPLITSTORE_MARKSET_MARK_001 hotSet.Mark(k1) //nolint hotSet.Mark(k2) //nolint coldSet.Mark(k3) //nolint @@ -144,6 +143,7 @@ func testMarkSet(t *testing.T, lsType string) { mustNotHave(coldSet, k3) mustNotHave(coldSet, k4) + //stm: @SPLITSTORE_MARKSET_CLOSE_001 err = hotSet.Close() if err != nil { t.Fatal(err) @@ -156,14 +156,7 @@ func testMarkSet(t *testing.T, lsType string) { } func testMarkSetVisitor(t *testing.T, lsType string) { - path, err := ioutil.TempDir("", "markset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() env, err := OpenMarkSetEnv(path, lsType) if err != nil { @@ -171,6 +164,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck + //stm: @SPLITSTORE_MARKSET_CREATE_VISITOR_001 visitor, err := env.New("test", 0) if err != nil { t.Fatal(err) @@ -225,14 +219,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) { } func testMarkSetVisitorRecovery(t *testing.T, lsType string) { - path, err := ioutil.TempDir("", "markset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() env, err := OpenMarkSetEnv(path, lsType) if err != nil { @@ -324,14 +311,7 @@ func testMarkSetVisitorRecovery(t *testing.T, lsType string) { } func testMarkSetRecovery(t *testing.T, lsType string) { - path, err := ioutil.TempDir("", "markset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() env, err := OpenMarkSetEnv(path, lsType) if err != nil { @@ -437,14 +417,7 @@ func testMarkSetRecovery(t *testing.T, lsType string) { } func testMarkSetMarkMany(t *testing.T, lsType string) { - path, err := ioutil.TempDir("", "markset.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() env, err := OpenMarkSetEnv(path, lsType) if err != nil { diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index ee30400a4..f2213aecc 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -1,12 +1,11 @@ +//stm: #unit package splitstore import ( "context" "errors" "fmt" - "io/ioutil" "math/rand" - "os" "sync" "sync/atomic" "testing" @@ -85,14 +84,7 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } - path, err := ioutil.TempDir("", "splitstore.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() // open the splitstore ss, err := Open(path, ds, hot, cold, cfg) @@ -228,10 +220,16 @@ func testSplitStore(t *testing.T, cfg *Config) { } func TestSplitStoreCompaction(t *testing.T) { + //stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001 + //stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001 + //stm: @SPLITSTORE_SPLITSTORE_CLOSE_001 testSplitStore(t, &Config{MarkSetType: "map"}) } func TestSplitStoreCompactionWithBadger(t *testing.T) { + //stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001 + //stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001 + //stm: @SPLITSTORE_SPLITSTORE_CLOSE_001 bs := badgerMarkSetBatchSize badgerMarkSetBatchSize = 1 t.Cleanup(func() { @@ -241,6 +239,9 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) { } func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { + //stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001 + //stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001 + //stm: @SPLITSTORE_SPLITSTORE_CLOSE_001 ctx := context.Background() chain := &mockChain{t: t} @@ -277,14 +278,7 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { t.Fatal(err) } - path, err := ioutil.TempDir("", "splitstore.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() // open the splitstore ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) @@ -424,14 +418,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore. } } - path, err := ioutil.TempDir("", "splitstore.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) if err != nil { @@ -531,14 +518,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks } } - path, err := ioutil.TempDir("", "splitstore.*") - if err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { - _ = os.RemoveAll(path) - }) + path := t.TempDir() ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"}) if err != nil { diff --git a/blockstore/timed_test.go b/blockstore/timed_test.go index 16795f047..df0d6c035 100644 --- a/blockstore/timed_test.go +++ b/blockstore/timed_test.go @@ -1,3 +1,4 @@ +//stm: #unit package blockstore import ( @@ -13,6 +14,9 @@ import ( ) func TestTimedCacheBlockstoreSimple(t *testing.T) { + //stm: @SPLITSTORE_TIMED_BLOCKSTORE_START_001 + //stm: @SPLITSTORE_TIMED_BLOCKSTORE_PUT_001, @SPLITSTORE_TIMED_BLOCKSTORE_HAS_001, @SPLITSTORE_TIMED_BLOCKSTORE_GET_001 + //stm: @SPLITSTORE_TIMED_BLOCKSTORE_ALL_KEYS_CHAN_001 tc := NewTimedCacheBlockstore(10 * time.Millisecond) mClock := clock.NewMock() mClock.Set(time.Now()) diff --git a/blockstore/union_test.go b/blockstore/union_test.go index 3ae8c1d49..46433979a 100644 --- a/blockstore/union_test.go +++ b/blockstore/union_test.go @@ -1,3 +1,4 @@ +//stm: #unit package blockstore import ( @@ -15,6 +16,7 @@ var ( ) func TestUnionBlockstore_Get(t *testing.T) { + //stm: @SPLITSTORE_UNION_BLOCKSTORE_GET_001 ctx := context.Background() m1 := NewMemory() m2 := NewMemory() @@ -34,6 +36,9 @@ func TestUnionBlockstore_Get(t *testing.T) { } func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + //stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_001, @SPLITSTORE_UNION_BLOCKSTORE_HAS_001 + //stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_MANY_001, @SPLITSTORE_UNION_BLOCKSTORE_DELETE_001 + //stm: @SPLITSTORE_UNION_BLOCKSTORE_ALL_KEYS_CHAN_001 ctx := context.Background() m1 := NewMemory() m2 := NewMemory() diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 2261c102e..61774ef34 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 8475edc77..9d1247ec7 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 465501bc6..cae6788f9 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/openrpc_test.go b/build/openrpc_test.go index 20c775331..967c7f988 100644 --- a/build/openrpc_test.go +++ b/build/openrpc_test.go @@ -1,3 +1,4 @@ +//stm: #unit package build import ( @@ -7,6 +8,7 @@ import ( ) func TestOpenRPCDiscoverJSON_Version(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_OPENRPC_VERSION_001 // openRPCDocVersion is the current OpenRPC version of the API docs. openRPCDocVersion := "1.2.6" diff --git a/build/version.go b/build/version.go index c80c1df40..b22aa0d2a 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.15.1-dev" +const BuildVersion = "1.15.2-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/adt/diff_adt_test.go b/chain/actors/adt/diff_adt_test.go index b0e01b78d..6e50a7f7c 100644 --- a/chain/actors/adt/diff_adt_test.go +++ b/chain/actors/adt/diff_adt_test.go @@ -1,3 +1,4 @@ +//stm: #unit package adt import ( @@ -44,6 +45,7 @@ func TestDiffAdtArray(t *testing.T) { changes := new(TestDiffArray) + //stm: @CHAIN_ADT_ARRAY_DIFF_001 assert.NoError(t, DiffAdtArray(arrA, arrB, changes)) assert.NotNil(t, changes) @@ -98,6 +100,7 @@ func TestDiffAdtMap(t *testing.T) { changes := new(TestDiffMap) + //stm: @CHAIN_ADT_MAP_DIFF_001 assert.NoError(t, DiffAdtMap(mapA, mapB, changes)) assert.NotNil(t, changes) diff --git a/chain/actors/aerrors/error_test.go b/chain/actors/aerrors/error_test.go index 3bfd3d042..63409ab55 100644 --- a/chain/actors/aerrors/error_test.go +++ b/chain/actors/aerrors/error_test.go @@ -1,3 +1,4 @@ +//stm: #unit package aerrors_test import ( @@ -11,6 +12,7 @@ import ( ) func TestFatalError(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001 e1 := xerrors.New("out of disk space") e2 := xerrors.Errorf("could not put node: %w", e1) e3 := xerrors.Errorf("could not save head: %w", e2) @@ -24,6 +26,7 @@ func TestFatalError(t *testing.T) { assert.True(t, IsFatal(aw4), "should be fatal") } func TestAbsorbeError(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001 e1 := xerrors.New("EOF") e2 := xerrors.Errorf("could not decode: %w", e1) ae := Absorb(e2, 35, "failed to decode CBOR") diff --git a/chain/actors/policy/policy_test.go b/chain/actors/policy/policy_test.go index f40250fba..92a9f6965 100644 --- a/chain/actors/policy/policy_test.go +++ b/chain/actors/policy/policy_test.go @@ -1,3 +1,4 @@ +//stm: #unit package policy import ( @@ -22,6 +23,7 @@ func TestSupportedProofTypes(t *testing.T) { for t := range miner0.SupportedProofTypes { oldTypes = append(oldTypes, t) } + //stm: @BLOCKCHAIN_POLICY_SET_MAX_SUPPORTED_PROOF_TYPES_001 t.Cleanup(func() { SetSupportedProofTypes(oldTypes...) }) @@ -33,6 +35,7 @@ func TestSupportedProofTypes(t *testing.T) { abi.RegisteredSealProof_StackedDrg2KiBV1: {}, }, ) + //stm: @BLOCKCHAIN_POLICY_ADD_MAX_SUPPORTED_PROOF_TYPES_001 AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) require.EqualValues(t, miner0.SupportedProofTypes, @@ -45,6 +48,7 @@ func TestSupportedProofTypes(t *testing.T) { // Tests assumptions about policies being the same between actor versions. func TestAssumptions(t *testing.T) { + //stm: @BLOCKCHAIN_POLICY_ASSUMPTIONS_001 require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0) require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay) require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension) @@ -58,6 +62,7 @@ func TestAssumptions(t *testing.T) { } func TestPartitionSizes(t *testing.T) { + //stm: @CHAIN_ACTOR_PARTITION_SIZES_001 for _, p := range abi.SealProofInfos { sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof) require.NoError(t, err) @@ -71,6 +76,7 @@ func TestPartitionSizes(t *testing.T) { } func TestPoStSize(t *testing.T) { + //stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001 v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1) require.Equal(t, 4, v12PoStSize) require.NoError(t, err) diff --git a/chain/beacon/drand/drand_test.go b/chain/beacon/drand/drand_test.go index d66ee7b54..44effd506 100644 --- a/chain/beacon/drand/drand_test.go +++ b/chain/beacon/drand/drand_test.go @@ -1,3 +1,5 @@ +//stm: ignore +//Only tests external library behavior, therefore it should not be annotated package drand import ( diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/filcns/compute_state.go index 44b792854..9b2183a59 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/filcns/compute_state.go @@ -2,6 +2,7 @@ package filcns import ( "context" + "os" "sync/atomic" "github.com/filecoin-project/lotus/chain/rand" @@ -94,7 +95,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager }() ctx = blockstore.WithHotView(ctx) - makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) { + makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (vm.Interface, error) { vmopt := &vm.VMOpts{ StateBase: base, Epoch: e, @@ -108,10 +109,23 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), } + if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" { + // This is needed so that the FVM does not have to duplicate the genesis vesting schedule, one + // of the components of the circ supply calc. + // This field is NOT needed by the LegacyVM, and also NOT needed by the FVM from v15 onwards. + filVested, err := sm.GetFilVested(ctx, e) + if err != nil { + return nil, err + } + + vmopt.FilVested = filVested + return vm.NewFVM(ctx, vmopt) + } + return sm.VMConstructor()(ctx, vmopt) } - runCron := func(vmCron *vm.VM, epoch abi.ChainEpoch) error { + runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error { cronMsg := &types.Message{ To: cron.Address, From: builtin.SystemActorAddr, diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 5f52cbd92..1bc5ce710 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -1,3 +1,4 @@ +//stm: #unit package events import ( @@ -358,6 +359,7 @@ func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...in var _ EventAPI = &fakeCS{} func TestAt(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) require.NoError(t, err) @@ -418,6 +420,7 @@ func TestAt(t *testing.T) { } func TestAtNullTrigger(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) require.NoError(t, err) @@ -447,6 +450,7 @@ func TestAtNullTrigger(t *testing.T) { } func TestAtNullConf(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001 ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -485,6 +489,7 @@ func TestAtNullConf(t *testing.T) { } func TestAtStart(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -515,6 +520,7 @@ func TestAtStart(t *testing.T) { } func TestAtStartConfidence(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -541,6 +547,7 @@ func TestAtStartConfidence(t *testing.T) { } func TestAtChained(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -571,6 +578,7 @@ func TestAtChained(t *testing.T) { } func TestAtChainedConfidence(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -601,6 +609,7 @@ func TestAtChainedConfidence(t *testing.T) { } func TestAtChainedConfidenceNull(t *testing.T) { + //stm: @EVENTS_HEIGHT_CHAIN_AT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -632,6 +641,7 @@ func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Messag } func TestCalled(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -837,6 +847,7 @@ func TestCalled(t *testing.T) { } func TestCalledTimeout(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -897,6 +908,7 @@ func TestCalledTimeout(t *testing.T) { } func TestCalledOrder(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -953,6 +965,7 @@ func TestCalledOrder(t *testing.T) { } func TestCalledNull(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -1011,6 +1024,7 @@ func TestCalledNull(t *testing.T) { } func TestRemoveTriggersOnMessage(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -1094,6 +1108,7 @@ type testStateChange struct { } func TestStateChanged(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -1179,6 +1194,7 @@ func TestStateChanged(t *testing.T) { } func TestStateChangedRevert(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -1255,6 +1271,7 @@ func TestStateChangedRevert(t *testing.T) { } func TestStateChangedTimeout(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 timeoutHeight := abi.ChainEpoch(20) confidence := 3 @@ -1332,6 +1349,7 @@ func TestStateChangedTimeout(t *testing.T) { } func TestCalledMultiplePerEpoch(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) events, err := NewEvents(context.Background(), fcs) @@ -1384,6 +1402,7 @@ func TestCalledMultiplePerEpoch(t *testing.T) { } func TestCachedSameBlock(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 fcs := newFakeCS(t) _, err := NewEvents(context.Background(), fcs) @@ -1418,6 +1437,7 @@ func (t *testObserver) Revert(_ context.Context, from, to *types.TipSet) error { } func TestReconnect(t *testing.T) { + //stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001 ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/chain/events/state/predicates_test.go b/chain/events/state/predicates_test.go index bdc7523dc..949d67dd4 100644 --- a/chain/events/state/predicates_test.go +++ b/chain/events/state/predicates_test.go @@ -1,3 +1,4 @@ +//stm: #unit package state import ( @@ -35,6 +36,12 @@ func init() { } func TestMarketPredicates(t *testing.T) { + //stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001 + //stm: @EVENTS_PREDICATES_DEAL_CHANGED_FOR_IDS + + //stm: @EVENTS_PREDICATES_ON_BALANCE_CHANGED_001, @EVENTS_PREDICATES_BALANCE_CHANGED_FOR_ADDRESS_001 + //stm: @EVENTS_PREDICATES_ON_DEAL_PROPOSAL_CHANGED_001, @EVENTS_PREDICATES_PROPOSAL_AMT_CHANGED_001 + //stm: @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_AMT_CHANGED_001 ctx := context.Background() bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) @@ -333,6 +340,8 @@ func TestMarketPredicates(t *testing.T) { } func TestMinerSectorChange(t *testing.T) { + //stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_MINER_ACTOR_CHANGE_001 + //stm: @EVENTS_PREDICATES_MINER_SECTOR_CHANGE_001 ctx := context.Background() bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) diff --git a/chain/events/tscache_test.go b/chain/events/tscache_test.go index c3779eb9e..dad59f185 100644 --- a/chain/events/tscache_test.go +++ b/chain/events/tscache_test.go @@ -1,3 +1,4 @@ +//stm: #unit package events import ( @@ -92,6 +93,7 @@ func (h *cacheHarness) skip(n abi.ChainEpoch) { } func TestTsCache(t *testing.T) { + //stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001 h := newCacheharness(t) for i := 0; i < 9000; i++ { @@ -104,6 +106,8 @@ func TestTsCache(t *testing.T) { } func TestTsCacheNulls(t *testing.T) { + //stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_BEFORE_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_AFTER_001 + //stm: @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001 ctx := context.Background() h := newCacheharness(t) @@ -182,6 +186,7 @@ func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSet(ctx context.Context, tsk } func TestTsCacheEmpty(t *testing.T) { + //stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001 // Calling best on an empty cache should just call out to the chain API callCounter := &tsCacheAPIStorageCallCounter{t: t} tsc := newTSCache(callCounter, 50) @@ -191,6 +196,7 @@ func TestTsCacheEmpty(t *testing.T) { } func TestTsCacheSkip(t *testing.T) { + //stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001 h := newCacheharness(t) ts, err := types.NewTipSet([]*types.BlockHeader{{ diff --git a/chain/gen/gen_test.go b/chain/gen/gen_test.go index 8c38328d0..e0637fbd8 100644 --- a/chain/gen/gen_test.go +++ b/chain/gen/gen_test.go @@ -1,3 +1,4 @@ +//stm: #unit package gen import ( @@ -34,6 +35,7 @@ func testGeneration(t testing.TB, n int, msgs int, sectors int) { } func TestChainGeneration(t *testing.T) { + //stm: @CHAIN_GEN_NEW_GEN_WITH_SECTORS_001, @CHAIN_GEN_NEXT_TIPSET_001 t.Run("10-20-1", func(t *testing.T) { testGeneration(t, 10, 20, 1) }) t.Run("10-20-25", func(t *testing.T) { testGeneration(t, 10, 20, 25) }) } diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 6ab101e78..a1d1d01b8 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -491,12 +491,13 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, + FilVested: big.Zero(), NetworkVersion: nv, - BaseFee: types.NewInt(0), + BaseFee: big.Zero(), } - vm, err := vm.NewVM(ctx, &vmopt) + vm, err := vm.NewLegacyVM(ctx, &vmopt) if err != nil { - return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) + return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err) } for mi, m := range template.Miners { diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index 274918147..fd83a7640 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -95,12 +95,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, NetworkVersion: nv, - BaseFee: types.NewInt(0), + BaseFee: big.Zero(), + FilVested: big.Zero(), } - vm, err := vm.NewVM(ctx, vmopt) + vm, err := vm.NewLegacyVM(ctx, vmopt) if err != nil { - return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) + return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err) } if len(miners) == 0 { @@ -520,7 +521,7 @@ func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization cry return out, nil } -func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { +func currentTotalPower(ctx context.Context, vm *vm.LegacyVM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) { pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil) if err != nil { return nil, err @@ -533,7 +534,7 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (* return &pwr, nil } -func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) { +func dealWeight(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) { // TODO: This hack should move to market actor wrapper if av <= actors.Version2 { params := &market0.VerifyDealsForActivationParams{ @@ -593,7 +594,7 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [ return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil } -func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) { +func currentEpochBlockReward(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) { rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil) if err != nil { return big.Zero(), builtin.FilterEstimate{}, err @@ -628,7 +629,7 @@ func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Addre return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil } -func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount { +func circSupply(ctx context.Context, vmi *vm.LegacyVM, maddr address.Address) abi.TokenAmount { unsafeVM := &vm.UnsafeVM{VM: vmi} rt := unsafeVM.MakeRuntime(ctx, &types.Message{ GasLimit: 1_000_000_000, diff --git a/chain/gen/genesis/util.go b/chain/gen/genesis/util.go index 67a4e9579..452bc835b 100644 --- a/chain/gen/genesis/util.go +++ b/chain/gen/genesis/util.go @@ -21,7 +21,7 @@ func mustEnc(i cbg.CBORMarshaler) []byte { return enc } -func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { +func doExecValue(ctx context.Context, vm *vm.LegacyVM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { act, err := vm.StateTree().GetActor(from) if err != nil { return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err) diff --git a/chain/market/fundmanager_test.go b/chain/market/fundmanager_test.go index 125304343..8f2d5a2f9 100644 --- a/chain/market/fundmanager_test.go +++ b/chain/market/fundmanager_test.go @@ -1,3 +1,4 @@ +//stm: #unit package market import ( @@ -22,6 +23,7 @@ import ( // TestFundManagerBasic verifies that the basic fund manager operations work func TestFundManagerBasic(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -106,6 +108,7 @@ func TestFundManagerBasic(t *testing.T) { // TestFundManagerParallel verifies that operations can be run in parallel func TestFundManagerParallel(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -197,6 +200,7 @@ func TestFundManagerParallel(t *testing.T) { // TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet func TestFundManagerReserveByWallet(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -290,6 +294,7 @@ func TestFundManagerReserveByWallet(t *testing.T) { // TestFundManagerWithdrawal verifies that as many withdraw operations as // possible are processed func TestFundManagerWithdrawalLimit(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -384,6 +389,7 @@ func TestFundManagerWithdrawalLimit(t *testing.T) { // TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet func TestFundManagerWithdrawByWallet(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -493,6 +499,7 @@ func TestFundManagerWithdrawByWallet(t *testing.T) { // TestFundManagerRestart verifies that waiting for incomplete requests resumes // on restart func TestFundManagerRestart(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001 s := setup(t) defer s.fm.Stop() @@ -559,6 +566,7 @@ func TestFundManagerRestart(t *testing.T) { // 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5 // 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5 func TestFundManagerReleaseAfterPublish(t *testing.T) { + //stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001 s := setup(t) defer s.fm.Stop() diff --git a/chain/messagepool/block_proba_test.go b/chain/messagepool/block_proba_test.go index 93f51e887..c33691e2b 100644 --- a/chain/messagepool/block_proba_test.go +++ b/chain/messagepool/block_proba_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagepool import ( @@ -8,6 +9,7 @@ import ( ) func TestBlockProbability(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_001 mp := &MessagePool{} bp := mp.blockProbabilities(1 - 0.15) t.Logf("%+v\n", bp) @@ -20,6 +22,7 @@ func TestBlockProbability(t *testing.T) { } func TestWinnerProba(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002 rand.Seed(time.Now().UnixNano()) const N = 1000000 winnerProba := noWinnersProb() diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index d7f075aab..4916fc71e 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -854,7 +854,6 @@ func TestMessageValueTooHigh(t *testing.T) { Message: *msg, Signature: *sig, } - err = mp.Add(context.TODO(), sm) assert.Error(t, err) } @@ -901,8 +900,7 @@ func TestMessageSignatureInvalid(t *testing.T) { } err = mp.Add(context.TODO(), sm) assert.Error(t, err) - // assert.Contains(t, err.Error(), "invalid signature length") - assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid signature length") } } @@ -926,14 +924,29 @@ func TestAddMessageTwice(t *testing.T) { to := mock.Address(1001) { - // create a valid messages - sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64()) + msg := &types.Message{ + To: to, + From: from, + Value: types.NewInt(1), + Nonce: 0, + GasLimit: 50000000, + GasFeeCap: types.NewInt(minimumBaseFee.Uint64()), + GasPremium: types.NewInt(1), + Params: make([]byte, 32<<10), + } + + sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + panic(err) + } + sm := &types.SignedMessage{ + Message: *msg, + Signature: *sig, + } mustAdd(t, mp, sm) - // try to add it twice err = mp.Add(context.TODO(), sm) - // assert.Contains(t, err.Error(), "with nonce 0 already in mpool") - assert.Error(t, err) + assert.Contains(t, err.Error(), "with nonce 0 already in mpool") } } @@ -963,8 +976,7 @@ func TestAddMessageTwiceNonceGap(t *testing.T) { // then try to add message again err = mp.Add(context.TODO(), sm) - // assert.Contains(t, err.Error(), "unfulfilled nonce gap") - assert.Error(t, err) + assert.Contains(t, err.Error(), "unfulfilled nonce gap") } } diff --git a/chain/state/statetree_test.go b/chain/state/statetree_test.go index 9177af312..b08de8a3c 100644 --- a/chain/state/statetree_test.go +++ b/chain/state/statetree_test.go @@ -1,3 +1,4 @@ +//stm: #unit package state import ( @@ -18,6 +19,7 @@ import ( ) func BenchmarkStateTreeSet(b *testing.B) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001 cst := cbor.NewMemCborStore() st, err := NewStateTree(cst, types.StateTreeVersion1) if err != nil { @@ -45,6 +47,7 @@ func BenchmarkStateTreeSet(b *testing.B) { } func BenchmarkStateTreeSetFlush(b *testing.B) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001 cst := cbor.NewMemCborStore() sv, err := VersionForNetwork(build.NewestNetworkVersion) if err != nil { @@ -80,6 +83,8 @@ func BenchmarkStateTreeSetFlush(b *testing.B) { } func TestResolveCache(t *testing.T) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001 + //stm: @CHAIN_STATETREE_SNAPSHOT_001, @CHAIN_STATETREE_SNAPSHOT_CLEAR_001 cst := cbor.NewMemCborStore() sv, err := VersionForNetwork(build.NewestNetworkVersion) if err != nil { @@ -182,6 +187,8 @@ func TestResolveCache(t *testing.T) { } func BenchmarkStateTree10kGetActor(b *testing.B) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001 + //stm: @CHAIN_STATETREE_FLUSH_001 cst := cbor.NewMemCborStore() sv, err := VersionForNetwork(build.NewestNetworkVersion) if err != nil { @@ -229,6 +236,7 @@ func BenchmarkStateTree10kGetActor(b *testing.B) { } func TestSetCache(t *testing.T) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001 cst := cbor.NewMemCborStore() sv, err := VersionForNetwork(build.NewestNetworkVersion) if err != nil { @@ -270,6 +278,8 @@ func TestSetCache(t *testing.T) { } func TestSnapshots(t *testing.T) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001 + //stm: @CHAIN_STATETREE_FLUSH_001, @CHAIN_STATETREE_SNAPSHOT_REVERT_001, CHAIN_STATETREE_SNAPSHOT_CLEAR_001 ctx := context.Background() cst := cbor.NewMemCborStore() @@ -360,6 +370,7 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) { } func TestStateTreeConsistency(t *testing.T) { + //stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001, @CHAIN_STATETREE_FLUSH_001 cst := cbor.NewMemCborStore() // TODO: ActorUpgrade: this test tests pre actors v2 diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 31639701d..5db508008 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -5,6 +5,12 @@ import ( "errors" "fmt" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/lotus/chain/state" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/go-address" @@ -64,6 +70,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. pheight = ts.Height() - 1 } + // Since we're simulating a future message, pretend we're applying it in the "next" tipset + vmHeight := pheight + 1 bstate := ts.ParentState() // Run the (not expensive) migration. @@ -72,9 +80,14 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. return nil, fmt.Errorf("failed to handle fork: %w", err) } + filVested, err := sm.GetFilVested(ctx, vmHeight) + if err != nil { + return nil, err + } + vmopt := &vm.VMOpts{ StateBase: bstate, - Epoch: pheight + 1, + Epoch: vmHeight, Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion), Bstore: sm.cs.StateBlockstore(), Actors: sm.tsExec.NewActorRegistry(), @@ -82,6 +95,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. CircSupplyCalc: sm.GetVMCirculatingSupply, NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1), BaseFee: types.NewInt(0), + FilVested: filVested, LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -112,7 +126,12 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ) } - fromActor, err := vmi.StateTree().GetActor(msg.From) + stTree, err := sm.StateTree(bstate) + if err != nil { + return nil, xerrors.Errorf("failed to load state tree: %w", err) + } + + fromActor, err := stTree.GetActor(msg.From) if err != nil { return nil, xerrors.Errorf("call raw get actor: %s", err) } @@ -175,13 +194,16 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri } } - state, _, err := sm.TipSetState(ctx, ts) + // Since we're simulating a future message, pretend we're applying it in the "next" tipset + vmHeight := ts.Height() + 1 + + stateCid, _, err := sm.TipSetState(ctx, ts) if err != nil { return nil, xerrors.Errorf("computing tipset state: %w", err) } // Technically, the tipset we're passing in here should be ts+1, but that may not exist. - state, err = sm.HandleStateForks(ctx, state, ts.Height(), nil, ts) + stateCid, err = sm.HandleStateForks(ctx, stateCid, ts.Height(), nil, ts) if err != nil { return nil, fmt.Errorf("failed to handle fork: %w", err) } @@ -196,16 +218,23 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri ) } + filVested, err := sm.GetFilVested(ctx, vmHeight) + if err != nil { + return nil, err + } + + buffStore := blockstore.NewBuffered(sm.cs.StateBlockstore()) vmopt := &vm.VMOpts{ - StateBase: state, - Epoch: ts.Height() + 1, + StateBase: stateCid, + Epoch: vmHeight, Rand: r, - Bstore: sm.cs.StateBlockstore(), + Bstore: buffStore, Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1), BaseFee: ts.Blocks()[0].ParentBaseFee, + FilVested: filVested, LookbackState: LookbackStateGetterForTipset(sm, ts), } vmi, err := sm.newVM(ctx, vmopt) @@ -219,7 +248,19 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri } } - fromActor, err := vmi.StateTree().GetActor(msg.From) + // We flush to get the VM's view of the state tree after applying the above messages + // This is needed to get the correct nonce from the actor state to match the VM + stateCid, err = vmi.Flush(ctx) + if err != nil { + return nil, xerrors.Errorf("flushing vm: %w", err) + } + + stTree, err := state.LoadStateTree(cbor.NewCborStore(buffStore), stateCid) + if err != nil { + return nil, xerrors.Errorf("loading state tree: %w", err) + } + + fromActor, err := stTree.GetActor(msg.From) if err != nil { return nil, xerrors.Errorf("call raw get actor: %s", err) } diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 4fad1e4fc..9a22716ba 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -1,3 +1,4 @@ +//stm: #integration package stmgr_test import ( @@ -106,6 +107,9 @@ func (ta *testActor) TestMethod(rt rt2.Runtime, params *abi.EmptyValue) *abi.Emp } func TestForkHeightTriggers(t *testing.T) { + //stm: @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_FLUSH_001, @TOKEN_WALLET_SIGN_001 + //stm: @CHAIN_GEN_NEXT_TIPSET_001 + //stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001 logging.SetAllLoggers(logging.LevelInfo) ctx := context.TODO() @@ -166,8 +170,8 @@ func TestForkHeightTriggers(t *testing.T) { inv := filcns.NewActorRegistry() inv.Register(nil, testActor{}) - sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { - nvm, err := vm.NewVM(ctx, vmopt) + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) if err != nil { return nil, err } @@ -241,6 +245,8 @@ func TestForkHeightTriggers(t *testing.T) { } func TestForkRefuseCall(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001, @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001 + //stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001, @CHAIN_STATE_CALL_001 logging.SetAllLoggers(logging.LevelInfo) for after := 0; after < 3; after++ { @@ -281,8 +287,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { inv := filcns.NewActorRegistry() inv.Register(nil, testActor{}) - sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { - nvm, err := vm.NewVM(ctx, vmopt) + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) if err != nil { return nil, err } @@ -360,6 +366,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { } func TestForkPreMigration(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001, + //stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001 logging.SetAllLoggers(logging.LevelInfo) cg, err := gen.NewGenerator() @@ -500,8 +508,8 @@ func TestForkPreMigration(t *testing.T) { inv := filcns.NewActorRegistry() inv.Register(nil, testActor{}) - sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { - nvm, err := vm.NewVM(ctx, vmopt) + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { + nvm, err := vm.NewLegacyVM(ctx, vmopt) if err != nil { return nil, err } diff --git a/chain/stmgr/searchwait_test.go b/chain/stmgr/searchwait_test.go index b8cd7ddcf..73635cdea 100644 --- a/chain/stmgr/searchwait_test.go +++ b/chain/stmgr/searchwait_test.go @@ -1,3 +1,4 @@ +//stm: #unit package stmgr_test import ( @@ -12,6 +13,8 @@ import ( ) func TestSearchForMessageReplacements(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001 + //stm: @CHAIN_STATE_SEARCH_MSG_001 ctx := context.Background() cg, err := gen.NewGenerator() if err != nil { diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 45dd52ec8..d0bdd73e9 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -84,7 +84,7 @@ type StateManager struct { compWait map[string]chan struct{} stlk sync.Mutex genesisMsigLk sync.Mutex - newVM func(context.Context, *vm.VMOpts) (*vm.VM, error) + newVM func(context.Context, *vm.VMOpts) (vm.Interface, error) Syscalls vm.SyscallBuilder preIgnitionVesting []msig0.State postIgnitionVesting []msig0.State @@ -347,12 +347,12 @@ func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) err return nil } -func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (*vm.VM, error)) { +func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (vm.Interface, error)) { sm.newVM = nvm } -func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.VM, error) { - return func(ctx context.Context, opts *vm.VMOpts) (*vm.VM, error) { +func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (vm.Interface, error) { + return func(ctx context.Context, opts *vm.VMOpts) (vm.Interface, error) { return sm.newVM(ctx, opts) } } diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index 0744c02aa..7c55a1a0d 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -196,8 +196,32 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error { // GetVestedFunds returns all funds that have "left" actors that are in the genesis state: // - For Multisigs, it counts the actual amounts that have vested at the given epoch // - For Accounts, it counts max(currentBalance - genesisBalance, 0). -func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) { +func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { vf := big.Zero() + + sm.genesisMsigLk.Lock() + defer sm.genesisMsigLk.Unlock() + + // TODO: combine all this? + if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() { + err := sm.setupGenesisVestingSchedule(ctx) + if err != nil { + return vf, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err) + } + } + if sm.postIgnitionVesting == nil { + err := sm.setupPostIgnitionVesting(ctx) + if err != nil { + return vf, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err) + } + } + if sm.postCalicoVesting == nil { + err := sm.setupPostCalicoVesting(ctx) + if err != nil { + return vf, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err) + } + } + if height <= build.UpgradeIgnitionHeight { for _, v := range sm.preIgnitionVesting { au := big.Sub(v.InitialBalance, v.AmountLocked(height)) @@ -282,7 +306,7 @@ func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmoun return pst.TotalLocked() } -func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { +func GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { filMarketLocked, err := getFilMarketLocked(ctx, st) if err != nil { @@ -316,28 +340,7 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C } func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) { - sm.genesisMsigLk.Lock() - defer sm.genesisMsigLk.Unlock() - if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() { - err := sm.setupGenesisVestingSchedule(ctx) - if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err) - } - } - if sm.postIgnitionVesting == nil { - err := sm.setupPostIgnitionVesting(ctx) - if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err) - } - } - if sm.postCalicoVesting == nil { - err := sm.setupPostCalicoVesting(ctx) - if err != nil { - return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err) - } - } - - filVested, err := sm.GetFilVested(ctx, height, st) + filVested, err := sm.GetFilVested(ctx, height) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err) } @@ -360,7 +363,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err) } - filLocked, err := sm.GetFilLocked(ctx, st) + filLocked, err := GetFilLocked(ctx, st) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err) } diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index 2a84c777b..49dd4700a 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -79,6 +79,11 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, // future. It's not guaranteed to be accurate... but that's fine. } + filVested, err := sm.GetFilVested(ctx, height) + if err != nil { + return cid.Undef, nil, err + } + r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) vmopt := &vm.VMOpts{ StateBase: base, @@ -90,6 +95,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, CircSupplyCalc: sm.GetVMCirculatingSupply, NetworkVersion: sm.GetNetworkVersion(ctx, height), BaseFee: ts.Blocks()[0].ParentBaseFee, + FilVested: filVested, LookbackState: LookbackStateGetterForTipset(sm, ts), } vmi, err := sm.newVM(ctx, vmopt) diff --git a/chain/store/basefee_test.go b/chain/store/basefee_test.go index b3d414cf5..58a465356 100644 --- a/chain/store/basefee_test.go +++ b/chain/store/basefee_test.go @@ -1,3 +1,5 @@ +//stm: #unit + package store import ( @@ -10,6 +12,7 @@ import ( ) func TestBaseFee(t *testing.T) { + //stm: @CHAIN_STORE_COMPUTE_NEXT_BASE_FEE_001 tests := []struct { basefee uint64 limitUsed int64 diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go index 73b45f3ad..acf61298d 100644 --- a/chain/store/checkpoint_test.go +++ b/chain/store/checkpoint_test.go @@ -1,3 +1,4 @@ +//stm: #unit package store_test import ( @@ -10,6 +11,9 @@ import ( ) func TestChainCheckpoint(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001 + //stm: @CHAIN_STORE_GET_TIPSET_FROM_KEY_001, @CHAIN_STORE_SET_HEAD_001, @CHAIN_STORE_GET_HEAVIEST_TIPSET_001 + //stm: @CHAIN_STORE_SET_CHECKPOINT_001, @CHAIN_STORE_MAYBE_TAKE_HEAVIER_TIPSET_001, @CHAIN_STORE_REMOVE_CHECKPOINT_001 ctx := context.Background() cg, err := gen.NewGenerator() diff --git a/chain/store/coalescer_test.go b/chain/store/coalescer_test.go index d46285108..463eb5c79 100644 --- a/chain/store/coalescer_test.go +++ b/chain/store/coalescer_test.go @@ -1,3 +1,4 @@ +//stm: #unit package store import ( @@ -9,6 +10,7 @@ import ( ) func TestHeadChangeCoalescer(t *testing.T) { + //stm: @CHAIN_STORE_COALESCE_HEAD_CHANGE_001 notif := make(chan headChange, 1) c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error { notif <- headChange{apply: apply, revert: revert} diff --git a/chain/store/index_test.go b/chain/store/index_test.go index b7f1d570f..7b05dd068 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -1,3 +1,4 @@ +//stm: #unit package store_test import ( @@ -17,6 +18,9 @@ import ( ) func TestIndexSeeks(t *testing.T) { + //stm: @CHAIN_STORE_IMPORT_001 + //stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001, @CHAIN_STORE_PUT_TIPSET_001, @CHAIN_STORE_SET_GENESIS_BLOCK_001 + //stm: @CHAIN_STORE_CLOSE_001 cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 61fa8bdc8..b9630bcbd 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -18,6 +18,10 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) +func (cs *ChainStore) UnionStore() bstore.Blockstore { + return bstore.Union(cs.stateBlockstore, cs.chainBlockstore) +} + func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error { h := &car.CarHeader{ Roots: ts.Cids(), @@ -28,7 +32,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo return xerrors.Errorf("failed to write car header: %s", err) } - unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) + unionBs := cs.UnionStore() return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { blk, err := unionBs.Get(ctx, c) if err != nil { diff --git a/chain/store/store_test.go b/chain/store/store_test.go index a759a48a8..cf0d6e16c 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -1,3 +1,4 @@ +//stm: #unit package store_test import ( @@ -28,6 +29,8 @@ func init() { } func BenchmarkGetRandomness(b *testing.B) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001 + //stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001 cg, err := gen.NewGenerator() if err != nil { b.Fatal(err) @@ -85,6 +88,8 @@ func BenchmarkGetRandomness(b *testing.B) { } func TestChainExportImport(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001 + //stm: @CHAIN_STORE_IMPORT_001 cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) @@ -120,6 +125,9 @@ func TestChainExportImport(t *testing.T) { } func TestChainExportImportFull(t *testing.T) { + //stm: @CHAIN_GEN_NEXT_TIPSET_001 + //stm: @CHAIN_STORE_IMPORT_001, @CHAIN_STORE_EXPORT_001, @CHAIN_STORE_SET_HEAD_001 + //stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001 cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) diff --git a/chain/sync_manager_test.go b/chain/sync_manager_test.go index bbd690d23..50fedeed6 100644 --- a/chain/sync_manager_test.go +++ b/chain/sync_manager_test.go @@ -1,3 +1,4 @@ +//stm: #unit package chain import ( @@ -78,6 +79,7 @@ func assertGetSyncOp(t *testing.T, c chan *syncOp, ts *types.TipSet) { } func TestSyncManagerEdgeCase(t *testing.T) { + //stm: @CHAIN_SYNCER_SET_PEER_HEAD_001 ctx := context.Background() a := mock.TipSet(mock.MkBlock(genTs, 1, 1)) @@ -161,6 +163,7 @@ func TestSyncManagerEdgeCase(t *testing.T) { } func TestSyncManager(t *testing.T) { + //stm: @CHAIN_SYNCER_SET_PEER_HEAD_001 ctx := context.Background() a := mock.TipSet(mock.MkBlock(genTs, 1, 1)) diff --git a/chain/types/bigint_test.go b/chain/types/bigint_test.go index b66528db3..be03d5c39 100644 --- a/chain/types/bigint_test.go +++ b/chain/types/bigint_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -14,6 +15,7 @@ import ( ) func TestBigIntSerializationRoundTrip(t *testing.T) { + //stm: @CHAIN_TYPES_PARSE_BIGINT_001 testValues := []string{ "0", "1", "10", "-10", "9999", "12345678901234567891234567890123456789012345678901234567890", } @@ -42,6 +44,7 @@ func TestBigIntSerializationRoundTrip(t *testing.T) { } func TestFilRoundTrip(t *testing.T) { + //stm: @TYPES_FIL_PARSE_001 testValues := []string{ "0 FIL", "1 FIL", "1.001 FIL", "100.10001 FIL", "101100 FIL", "5000.01 FIL", "5000 FIL", } @@ -59,6 +62,7 @@ func TestFilRoundTrip(t *testing.T) { } func TestSizeStr(t *testing.T) { + //stm: @CHAIN_TYPES_SIZE_BIGINT_001 cases := []struct { in uint64 out string @@ -79,6 +83,7 @@ func TestSizeStr(t *testing.T) { } func TestSizeStrUnitsSymmetry(t *testing.T) { + //stm: @CHAIN_TYPES_SIZE_BIGINT_001 s := rand.NewSource(time.Now().UnixNano()) r := rand.New(s) @@ -95,6 +100,7 @@ func TestSizeStrUnitsSymmetry(t *testing.T) { } func TestSizeStrBig(t *testing.T) { + //stm: @CHAIN_TYPES_SIZE_BIGINT_001 ZiB := big.NewInt(50000) ZiB = ZiB.Lsh(ZiB, 70) diff --git a/chain/types/blockheader_test.go b/chain/types/blockheader_test.go index 6674f1205..e386277df 100644 --- a/chain/types/blockheader_test.go +++ b/chain/types/blockheader_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -51,6 +52,7 @@ func testBlockHeader(t testing.TB) *BlockHeader { } func TestBlockHeaderSerialization(t *testing.T) { + //stm: @CHAIN_TYPES_BLOCK_HEADER_FROM_CBOR_001, @CHAIN_TYPES_BLOCK_HEADER_TO_CBOR_001 bh := testBlockHeader(t) buf := new(bytes.Buffer) @@ -71,6 +73,7 @@ func TestBlockHeaderSerialization(t *testing.T) { } func TestInteropBH(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_BLOCK_INTEROP_001 newAddr, err := address.NewSecp256k1Address([]byte("address0")) if err != nil { diff --git a/chain/types/electionproof_test.go b/chain/types/electionproof_test.go index 21385868c..1eba21138 100644 --- a/chain/types/electionproof_test.go +++ b/chain/types/electionproof_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -11,6 +12,7 @@ import ( ) func TestPoissonFunction(t *testing.T) { + //stm: @CHAIN_TYPES_POISSON_001 tests := []struct { lambdaBase uint64 lambdaShift uint @@ -47,6 +49,7 @@ func TestPoissonFunction(t *testing.T) { } func TestLambdaFunction(t *testing.T) { + //stm: @CHAIN_TYPES_LAMBDA_001 tests := []struct { power string totalPower string @@ -72,6 +75,7 @@ func TestLambdaFunction(t *testing.T) { } func TestExpFunction(t *testing.T) { + //stm: @CHAIN_TYPES_NEGATIVE_EXP_001 const N = 256 step := big.NewInt(5) @@ -100,6 +104,7 @@ func q256ToF(x *big.Int) float64 { } func TestElectionLam(t *testing.T) { + //stm: @CHAIN_TYPES_LAMBDA_001 p := big.NewInt(64) tot := big.NewInt(128) lam := lambda(p, tot) @@ -128,6 +133,7 @@ func BenchmarkWinCounts(b *testing.B) { } func TestWinCounts(t *testing.T) { + //stm: @TYPES_ELECTION_PROOF_COMPUTE_WIN_COUNT_001 totalPower := NewInt(100) power := NewInt(20) diff --git a/chain/types/fil_test.go b/chain/types/fil_test.go index 7bf2a802e..5cbe22904 100644 --- a/chain/types/fil_test.go +++ b/chain/types/fil_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -7,6 +8,7 @@ import ( ) func TestFilShort(t *testing.T) { + //stm: @TYPES_FIL_PARSE_001 for _, s := range []struct { fil string expect string diff --git a/chain/types/message_test.go b/chain/types/message_test.go index a5a00f66b..637288374 100644 --- a/chain/types/message_test.go +++ b/chain/types/message_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -71,6 +72,7 @@ func TestEqualCall(t *testing.T) { Params: []byte("hai"), } + //stm: @TYPES_MESSAGE_EQUAL_CALL_001 require.True(t, m1.EqualCall(m2)) require.True(t, m1.EqualCall(m3)) require.False(t, m1.EqualCall(m4)) @@ -97,11 +99,13 @@ func TestMessageJson(t *testing.T) { exp := []byte("{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}}") fmt.Println(string(b)) + //stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001 require.Equal(t, exp, b) var um Message require.NoError(t, json.Unmarshal(b, &um)) + //stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002 require.EqualValues(t, *m, um) } @@ -131,10 +135,12 @@ func TestSignedMessageJson(t *testing.T) { exp := []byte("{\"Message\":{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}},\"Signature\":{\"Type\":0,\"Data\":null},\"CID\":{\"/\":\"bafy2bzacea5ainifngxj3rygaw2hppnyz2cw72x5pysqty2x6dxmjs5qg2uus\"}}") fmt.Println(string(b)) + //stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001 require.Equal(t, exp, b) var um SignedMessage require.NoError(t, json.Unmarshal(b, &um)) + //stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002 require.EqualValues(t, *sm, um) } diff --git a/chain/types/signature_test.go b/chain/types/signature_test.go index 9ade3c046..85b27fea9 100644 --- a/chain/types/signature_test.go +++ b/chain/types/signature_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -8,6 +9,7 @@ import ( ) func TestSignatureSerializeRoundTrip(t *testing.T) { + //stm: @CHAIN_TYPES_SIGNATURE_SERIALIZATION_001 s := &crypto.Signature{ Data: []byte("foo bar cat dog"), Type: crypto.SigTypeBLS, diff --git a/chain/types/tipset_key_test.go b/chain/types/tipset_key_test.go index 73c1ca9df..6e59ad68b 100644 --- a/chain/types/tipset_key_test.go +++ b/chain/types/tipset_key_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( @@ -12,6 +13,7 @@ import ( ) func TestTipSetKey(t *testing.T) { + //stm: @TYPES_TIPSETKEY_FROM_BYTES_001, @TYPES_TIPSETKEY_NEW_001 cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31} c1, _ := cb.Sum([]byte("a")) c2, _ := cb.Sum([]byte("b")) diff --git a/chain/types/types_test.go b/chain/types/types_test.go index 1056fc430..1083cdc38 100644 --- a/chain/types/types_test.go +++ b/chain/types/types_test.go @@ -1,3 +1,4 @@ +//stm: #unit package types import ( diff --git a/chain/types_test.go b/chain/types_test.go index b47471c9d..bf5dff4cc 100644 --- a/chain/types_test.go +++ b/chain/types_test.go @@ -1,3 +1,4 @@ +//stm: #unit package chain import ( @@ -12,6 +13,7 @@ import ( ) func TestSignedMessageJsonRoundtrip(t *testing.T) { + //stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002 to, _ := address.NewIDAddress(5234623) from, _ := address.NewIDAddress(603911192) smsg := &types.SignedMessage{ @@ -40,6 +42,7 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) { } func TestAddressType(t *testing.T) { + //stm: @CHAIN_TYPES_ADDRESS_PREFIX_001 build.SetAddressNetwork(address.Testnet) addr, err := makeRandomAddress() if err != nil { diff --git a/chain/vectors/vectors_test.go b/chain/vectors/vectors_test.go index 974a2c8de..248950787 100644 --- a/chain/vectors/vectors_test.go +++ b/chain/vectors/vectors_test.go @@ -1,3 +1,4 @@ +//stm: #unit package vectors import ( @@ -26,6 +27,7 @@ func LoadVector(t *testing.T, f string, out interface{}) { } func TestBlockHeaderVectors(t *testing.T) { + //stm: @CHAIN_TYPES_SERIALIZATION_BLOCK_001 var headers []HeaderVector LoadVector(t, "block_headers.json", &headers) @@ -46,6 +48,7 @@ func TestBlockHeaderVectors(t *testing.T) { } func TestMessageSigningVectors(t *testing.T) { + //stm: @CHAIN_TYPES_SERIALIZATION_SIGNED_MESSAGE_001 var msvs []MessageSigningVector LoadVector(t, "message_signing.json", &msvs) @@ -64,6 +67,7 @@ func TestMessageSigningVectors(t *testing.T) { } func TestUnsignedMessageVectors(t *testing.T) { + //stm: @CHAIN_TYPES_SERIALIZATION_MESSAGE_001 var msvs []UnsignedMessageVector LoadVector(t, "unsigned_messages.json", &msvs) diff --git a/chain/vm/burn_test.go b/chain/vm/burn_test.go index e4fc69aff..8690b7ca1 100644 --- a/chain/vm/burn_test.go +++ b/chain/vm/burn_test.go @@ -1,3 +1,4 @@ +//stm: #unit package vm import ( @@ -9,6 +10,7 @@ import ( ) func TestGasBurn(t *testing.T) { + //stm: @BURN_ESTIMATE_GAS_OVERESTIMATION_BURN_001 tests := []struct { used int64 limit int64 @@ -40,6 +42,7 @@ func TestGasBurn(t *testing.T) { } func TestGasOutputs(t *testing.T) { + //stm: @BURN_ESTIMATE_GAS_OUTPUTS_001 baseFee := types.NewInt(10) tests := []struct { used int64 diff --git a/chain/vm/fvm.go b/chain/vm/fvm.go new file mode 100644 index 000000000..922eb77c5 --- /dev/null +++ b/chain/vm/fvm.go @@ -0,0 +1,312 @@ +package vm + +import ( + "bytes" + "context" + "time" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/state" + cbor "github.com/ipfs/go-ipld-cbor" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/lib/sigs" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/blockstore" + + ffi "github.com/filecoin-project/filecoin-ffi" + ffi_cgo "github.com/filecoin-project/filecoin-ffi/cgo" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" +) + +var _ Interface = (*FVM)(nil) +var _ ffi_cgo.Externs = (*FvmExtern)(nil) + +type FvmExtern struct { + Rand + blockstore.Blockstore + epoch abi.ChainEpoch + lbState LookbackStateGetter + base cid.Cid +} + +// VerifyConsensusFault is similar to the one in syscalls.go used by the LegacyVM, except it never errors +// Errors are logged and "no fault" is returned, which is functionally what go-actors does anyway +func (x *FvmExtern) VerifyConsensusFault(ctx context.Context, a, b, extra []byte) (*ffi_cgo.ConsensusFault, int64) { + totalGas := int64(0) + ret := &ffi_cgo.ConsensusFault{ + Type: ffi_cgo.ConsensusFaultNone, + } + + // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. + // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. + // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so + // (which runs a syntactic check), we do it directly on the CIDs. + + // (0) cheap preliminary checks + + // can blocks be decoded properly? + var blockA, blockB types.BlockHeader + if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil { + log.Info("invalid consensus fault: cannot decode first block header: %w", decodeErr) + return ret, totalGas + } + + if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil { + log.Info("invalid consensus fault: cannot decode second block header: %w", decodeErr) + return ret, totalGas + } + + // are blocks the same? + if blockA.Cid().Equals(blockB.Cid()) { + log.Info("invalid consensus fault: submitted blocks are the same") + return ret, totalGas + } + // (1) check conditions necessary to any consensus fault + + // were blocks mined by same miner? + if blockA.Miner != blockB.Miner { + log.Info("invalid consensus fault: blocks not mined by the same miner") + return ret, totalGas + } + + // block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain). + if blockB.Height < blockA.Height { + log.Info("invalid consensus fault: first block must not be of higher height than second") + return ret, totalGas + } + + ret.Epoch = blockB.Height + + faultType := ffi_cgo.ConsensusFaultNone + + // (2) check for the consensus faults themselves + // (a) double-fork mining fault + if blockA.Height == blockB.Height { + faultType = ffi_cgo.ConsensusFaultDoubleForkMining + } + + // (b) time-offset mining fault + // strictly speaking no need to compare heights based on double fork mining check above, + // but at same height this would be a different fault. + if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { + faultType = ffi_cgo.ConsensusFaultTimeOffsetMining + } + + // (c) parent-grinding fault + // Here extra is the "witness", a third block that shows the connection between A and B as + // A's sibling and B's parent. + // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset + // + // B + // | + // [A, C] + var blockC types.BlockHeader + if len(extra) > 0 { + if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil { + log.Info("invalid consensus fault: cannot decode extra: %w", decodeErr) + return ret, totalGas + } + + if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && + types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { + faultType = ffi_cgo.ConsensusFaultParentGrinding + } + } + + // (3) return if no consensus fault by now + if faultType == ffi_cgo.ConsensusFaultNone { + log.Info("invalid consensus fault: no fault detected") + return ret, totalGas + } + + // else + // (4) expensive final checks + + // check blocks are properly signed by their respective miner + // note we do not need to check extra's: it is a parent to block b + // which itself is signed, so it was willingly included by the miner + gasA, sigErr := x.VerifyBlockSig(ctx, &blockA) + totalGas += gasA + if sigErr != nil { + log.Info("invalid consensus fault: cannot verify first block sig: %w", sigErr) + return ret, totalGas + } + + gas2, sigErr := x.VerifyBlockSig(ctx, &blockB) + totalGas += gas2 + if sigErr != nil { + log.Info("invalid consensus fault: cannot verify second block sig: %w", sigErr) + return ret, totalGas + } + + ret.Type = faultType + ret.Target = blockA.Miner + + return ret, totalGas +} + +func (x *FvmExtern) VerifyBlockSig(ctx context.Context, blk *types.BlockHeader) (int64, error) { + waddr, gasUsed, err := x.workerKeyAtLookback(ctx, blk.Miner, blk.Height) + if err != nil { + return gasUsed, err + } + + return gasUsed, sigs.CheckBlockSignature(ctx, blk, waddr) +} + +func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Address, height abi.ChainEpoch) (address.Address, int64, error) { + gasUsed := int64(0) + gasAdder := func(gc GasCharge) { + // technically not overflow safe, but that's fine + gasUsed += gc.Total() + } + + cstWithoutGas := cbor.NewCborStore(x.Blockstore) + cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore} + cstWithGas := cbor.NewCborStore(cbb) + + lbState, err := x.lbState(ctx, height) + if err != nil { + return address.Undef, gasUsed, err + } + // get appropriate miner actor + act, err := lbState.GetActor(minerId) + if err != nil { + return address.Undef, gasUsed, err + } + + // use that to get the miner state + mas, err := miner.Load(adt.WrapStore(ctx, cstWithGas), act) + if err != nil { + return address.Undef, gasUsed, err + } + + info, err := mas.Info() + if err != nil { + return address.Undef, gasUsed, err + } + + stateTree, err := state.LoadStateTree(cstWithoutGas, x.base) + if err != nil { + return address.Undef, gasUsed, err + } + + raddr, err := ResolveToKeyAddr(stateTree, cstWithGas, info.Worker) + if err != nil { + return address.Undef, gasUsed, err + } + + return raddr, gasUsed, nil +} + +type FVM struct { + fvm *ffi.FVM +} + +func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) { + circToReport := opts.FilVested + // For v14 (and earlier), we perform the FilVested portion of the calculation, and let the FVM dynamically do the rest + // v15 and after, the circ supply is always constant per epoch, so we calculate the base and report it at creation + if opts.NetworkVersion >= network.Version15 { + state, err := state.LoadStateTree(cbor.NewCborStore(opts.Bstore), opts.StateBase) + if err != nil { + return nil, err + } + + circToReport, err = opts.CircSupplyCalc(ctx, opts.Epoch, state) + if err != nil { + return nil, err + } + } + + fvm, err := ffi.CreateFVM(0, + &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch}, + opts.Epoch, opts.BaseFee, circToReport, opts.NetworkVersion, opts.StateBase, + ) + if err != nil { + return nil, err + } + + return &FVM{ + fvm: fvm, + }, nil +} + +func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { + start := build.Clock.Now() + msgBytes, err := cmsg.VMMessage().Serialize() + if err != nil { + return nil, xerrors.Errorf("serializing msg: %w", err) + } + + ret, err := vm.fvm.ApplyMessage(msgBytes, uint(cmsg.ChainLength())) + if err != nil { + return nil, xerrors.Errorf("applying msg: %w", err) + } + + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + Return: ret.Return, + ExitCode: exitcode.ExitCode(ret.ExitCode), + GasUsed: ret.GasUsed, + }, + GasCosts: &GasOutputs{ + // TODO: do the other optional fields eventually + BaseFeeBurn: big.Zero(), + OverEstimationBurn: big.Zero(), + MinerPenalty: ret.MinerPenalty, + MinerTip: ret.MinerTip, + Refund: big.Zero(), + GasRefund: 0, + GasBurned: 0, + }, + // TODO: do these eventually, not consensus critical + // https://github.com/filecoin-project/ref-fvm/issues/318 + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{}, + Duration: time.Since(start), + }, nil +} + +func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) { + start := build.Clock.Now() + msgBytes, err := cmsg.VMMessage().Serialize() + if err != nil { + return nil, xerrors.Errorf("serializing msg: %w", err) + } + ret, err := vm.fvm.ApplyImplicitMessage(msgBytes) + if err != nil { + return nil, xerrors.Errorf("applying msg: %w", err) + } + + return &ApplyRet{ + MessageReceipt: types.MessageReceipt{ + Return: ret.Return, + ExitCode: exitcode.ExitCode(ret.ExitCode), + GasUsed: ret.GasUsed, + }, + GasCosts: nil, + // TODO: do these eventually, not consensus critical + // https://github.com/filecoin-project/ref-fvm/issues/318 + ActorErr: nil, + ExecutionTrace: types.ExecutionTrace{}, + Duration: time.Since(start), + }, nil +} + +func (vm *FVM) Flush(ctx context.Context) (cid.Cid, error) { + return vm.fvm.Flush() +} diff --git a/chain/vm/gas.go b/chain/vm/gas.go index e75c86b9f..5beaae40b 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -50,7 +50,7 @@ func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge { } } -// Pricelist provides prices for operations in the VM. +// Pricelist provides prices for operations in the LegacyVM. // // Note: this interface should be APPEND ONLY since last chain checkpoint type Pricelist interface { diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 1bda6dfae..7e0ece769 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -50,7 +50,7 @@ type pricelistV0 struct { // whether it succeeds or fails in application) is given by: // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte // Together, these account for the cost of message propagation and validation, - // up to but excluding any actual processing by the VM. + // up to but excluding any actual processing by the LegacyVM. // This is the cost a block producer burns when including an invalid message. onChainMessageComputeBase int64 onChainMessageStorageBase int64 @@ -83,11 +83,11 @@ type pricelistV0 struct { sendInvokeMethod int64 // Gas cost for any Get operation to the IPLD store - // in the runtime VM context. + // in the runtime LegacyVM context. ipldGetBase int64 // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store - // in the runtime VM context. + // in the runtime LegacyVM context. // // Note: these costs should be significantly higher than the costs for Get // operations, since they reflect not only serialization/deserialization diff --git a/chain/vm/gas_v0_test.go b/chain/vm/gas_v0_test.go index 447e4f70c..0e657cb2c 100644 --- a/chain/vm/gas_v0_test.go +++ b/chain/vm/gas_v0_test.go @@ -1,3 +1,4 @@ +//stm: #unit package vm import ( diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index fb9910ecd..1b6fcc482 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -1,3 +1,4 @@ +//stm: #unit package vm import ( @@ -106,6 +107,7 @@ func (*basicRtMessage) ValueReceived() abi.TokenAmount { } func TestInvokerBasic(t *testing.T) { + //stm: @INVOKER_TRANSFORM_001 inv := ActorRegistry{} code, err := inv.transform(basicContract{}) assert.NoError(t, err) @@ -135,7 +137,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{networkVersion: network.Version0}, + vm: &LegacyVM{networkVersion: network.Version0}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { @@ -146,7 +148,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{networkVersion: network.Version7}, + vm: &LegacyVM{networkVersion: network.Version7}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index 0e2adc879..c27c45371 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -65,7 +65,7 @@ type Runtime struct { ctx context.Context - vm *VM + vm *LegacyVM state *state.StateTree height abi.ChainEpoch cst ipldcbor.IpldStore @@ -158,7 +158,7 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act defer func() { if r := recover(); r != nil { if ar, ok := r.(aerrors.ActorError); ok { - log.Warnf("VM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar) + log.Warnf("LegacyVM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar) aerr = ar return } diff --git a/chain/vm/runtime_test.go b/chain/vm/runtime_test.go index 9fc87f7c5..816835cb9 100644 --- a/chain/vm/runtime_test.go +++ b/chain/vm/runtime_test.go @@ -1,3 +1,4 @@ +//stm: #unit package vm import ( @@ -22,6 +23,7 @@ func (*NotAVeryGoodMarshaler) MarshalCBOR(writer io.Writer) error { var _ cbg.CBORMarshaler = &NotAVeryGoodMarshaler{} func TestRuntimePutErrors(t *testing.T) { + //stm: @CHAIN_VM_STORE_PUT_002 defer func() { err := recover() if err == nil { diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 1ab97bc33..a0ca446a7 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -122,7 +122,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error { return nil } -func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { +func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { rt := &Runtime{ ctx: ctx, vm: vm, @@ -188,7 +188,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti } type UnsafeVM struct { - VM *VM + VM *LegacyVM } func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime { @@ -201,7 +201,9 @@ type ( LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) ) -type VM struct { +var _ Interface = (*LegacyVM)(nil) + +type LegacyVM struct { cstate *state.StateTree cst *cbor.BasicIpldStore buf *blockstore.BufferedBlockstore @@ -225,12 +227,14 @@ type VMOpts struct { Actors *ActorRegistry Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator + // Amount of FIL vested from genesis actors. + FilVested abi.TokenAmount NetworkVersion network.Version BaseFee abi.TokenAmount LookbackState LookbackStateGetter } -func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { +func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) { buf := blockstore.NewBuffered(opts.Bstore) cst := cbor.NewCborStore(buf) state, err := state.LoadStateTree(cst, opts.StateBase) @@ -243,7 +247,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { return nil, err } - return &VM{ + return &LegacyVM{ cstate: state, cst: cst, buf: buf, @@ -272,7 +276,7 @@ type ApplyRet struct { GasCosts *GasOutputs } -func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, +func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtime, gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { defer atomic.AddUint64(&StatSends, 1) @@ -391,7 +395,7 @@ func checkMessage(msg *types.Message) error { return nil } -func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { +func (vm *LegacyVM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { start := build.Clock.Now() defer atomic.AddUint64(&StatApplied, 1) ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start) @@ -409,7 +413,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap }, actorErr } -func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { +func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { start := build.Clock.Now() ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage") defer span.End() @@ -616,7 +620,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, }, nil } -func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { +func (vm *LegacyVM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { if vm.networkVersion <= network.Version12 { // Check to see if we should burn funds. We avoid burning on successful // window post. This won't catch _indirect_ window post calls, but this @@ -646,7 +650,7 @@ func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Me type vmFlushKey struct{} -func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { +func (vm *LegacyVM) Flush(ctx context.Context) (cid.Cid, error) { _, span := trace.StartSpan(ctx, "vm.Flush") defer span.End() @@ -665,9 +669,9 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { return root, nil } -// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced -// during this VM's execution. -func (vm *VM) ActorStore(ctx context.Context) adt.Store { +// Get the buffered blockstore associated with the LegacyVM. This includes any temporary blocks produced +// during this LegacyVM's execution. +func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store { return adt.WrapStore(ctx, vm.cst) } @@ -820,11 +824,11 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid, return nil } -func (vm *VM) StateTree() types.StateTree { +func (vm *LegacyVM) StateTree() types.StateTree { return vm.cstate } -func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { +func (vm *LegacyVM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") defer span.End() if span.IsRecordingEvents() { @@ -847,11 +851,11 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params return ret, nil } -func (vm *VM) SetInvoker(i *ActorRegistry) { +func (vm *LegacyVM) SetInvoker(i *ActorRegistry) { vm.areg = i } -func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { +func (vm *LegacyVM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { // Before v15, this was recalculated on each invocation as the state tree was mutated if vm.networkVersion <= network.Version14 { return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) @@ -860,14 +864,14 @@ func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { return vm.baseCircSupply, nil } -func (vm *VM) incrementNonce(addr address.Address) error { +func (vm *LegacyVM) incrementNonce(addr address.Address) error { return vm.cstate.MutateActor(addr, func(a *types.Actor) error { a.Nonce++ return nil }) } -func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { +func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { var f *types.Actor var fromID, toID address.Address var err error @@ -955,7 +959,7 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersio return nil } -func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { +func (vm *LegacyVM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { if amt.LessThan(types.NewInt(0)) { return xerrors.Errorf("attempted to transfer negative value to gas holder") } @@ -969,7 +973,7 @@ func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, }) } -func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { +func (vm *LegacyVM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { if amt.LessThan(types.NewInt(0)) { return xerrors.Errorf("attempted to transfer negative value from gas holder") } diff --git a/chain/vm/vmi.go b/chain/vm/vmi.go new file mode 100644 index 000000000..9ffd8d830 --- /dev/null +++ b/chain/vm/vmi.go @@ -0,0 +1,27 @@ +package vm + +import ( + "context" + "os" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" +) + +type Interface interface { + // Applies the given message onto the VM's current state, returning the result of the execution + ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) + // Same as above but for system messages (the Cron invocation and block reward payments). + // Must NEVER fail. + ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) + // Flush all buffered objects into the state store provided to the VM at construction. + Flush(ctx context.Context) (cid.Cid, error) +} + +func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) { + if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" { + return NewFVM(ctx, opts) + } + + return NewLegacyVM(ctx, opts) +} diff --git a/cli/chain_test.go b/cli/chain_test.go index 0b3cce728..fa7d9d0bb 100644 --- a/cli/chain_test.go +++ b/cli/chain_test.go @@ -1,4 +1,4 @@ -//stm: #cli +//stm: #unit package cli import ( diff --git a/cli/client.go b/cli/client.go index da725a7be..ead520a48 100644 --- a/cli/client.go +++ b/cli/client.go @@ -1913,8 +1913,9 @@ type deal struct { } var clientGetDealCmd = &cli.Command{ - Name: "get-deal", - Usage: "Print detailed deal information", + Name: "get-deal", + Usage: "Print detailed deal information", + ArgsUsage: "[proposalCID]", Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { return cli.ShowCommandHelp(cctx, cctx.Command.Name) diff --git a/cli/mpool_test.go b/cli/mpool_test.go index d9eef452c..2ddd85999 100644 --- a/cli/mpool_test.go +++ b/cli/mpool_test.go @@ -1,4 +1,4 @@ -//stm: #cli +//stm: #unit package cli import ( diff --git a/cli/send_test.go b/cli/send_test.go index 52eafda67..a9829d4f3 100644 --- a/cli/send_test.go +++ b/cli/send_test.go @@ -1,3 +1,5 @@ +//stm: ignore +//stm: #unit package cli import ( diff --git a/cli/services_send_test.go b/cli/services_send_test.go index b7ed78f80..feebd58a1 100644 --- a/cli/services_send_test.go +++ b/cli/services_send_test.go @@ -1,3 +1,5 @@ +//stm: ignore +//stm: #unit package cli import ( diff --git a/cli/state.go b/cli/state.go index bac7efae8..18d9a12bd 100644 --- a/cli/state.go +++ b/cli/state.go @@ -1768,6 +1768,9 @@ var StateSectorCmd = &cli.Command{ fmt.Println("SectorNumber: ", si.SectorNumber) fmt.Println("SealProof: ", si.SealProof) fmt.Println("SealedCID: ", si.SealedCID) + if si.SectorKeyCID != nil { + fmt.Println("SectorKeyCID: ", si.SectorKeyCID) + } fmt.Println("DealIDs: ", si.DealIDs) fmt.Println() fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation)) diff --git a/cli/sync_test.go b/cli/sync_test.go index 90f20a029..fd0d834bf 100644 --- a/cli/sync_test.go +++ b/cli/sync_test.go @@ -1,3 +1,4 @@ +//stm: #unit package cli import ( diff --git a/cli/wallet_test.go b/cli/wallet_test.go index 22b3bc3e4..f73937019 100644 --- a/cli/wallet_test.go +++ b/cli/wallet_test.go @@ -1,4 +1,4 @@ -//stm: #cli +//stm: #unit package cli import ( diff --git a/cmd/lotus-bench/stats_test.go b/cmd/lotus-bench/stats_test.go index 16caf3f7e..ff2e3dce8 100644 --- a/cmd/lotus-bench/stats_test.go +++ b/cmd/lotus-bench/stats_test.go @@ -1,3 +1,4 @@ +//stm: #unit package main import ( diff --git a/cmd/lotus-fountain/rate_limiter_test.go b/cmd/lotus-fountain/rate_limiter_test.go index 03590de50..eefb07c8a 100644 --- a/cmd/lotus-fountain/rate_limiter_test.go +++ b/cmd/lotus-fountain/rate_limiter_test.go @@ -1,3 +1,4 @@ +//stm: #unit package main import ( @@ -8,6 +9,7 @@ import ( ) func TestRateLimit(t *testing.T) { + //stm: @CMD_LIMITER_GET_IP_LIMITER_001, @CMD_LIMITER_GET_WALLET_LIMITER_001 limiter := NewLimiter(LimiterConfig{ TotalRate: time.Second, TotalBurst: 20, diff --git a/cmd/lotus-health/main_test.go b/cmd/lotus-health/main_test.go index 346376167..f22642279 100644 --- a/cmd/lotus-health/main_test.go +++ b/cmd/lotus-health/main_test.go @@ -1,3 +1,4 @@ +//stm: #unit package main import ( @@ -9,6 +10,7 @@ import ( ) func TestAppendCIDsToWindow(t *testing.T) { + //stm: @CMD_HEALTH_APPEND_CIDS_001 assert := assert.New(t) var window CidWindow threshold := 3 @@ -27,6 +29,7 @@ func TestAppendCIDsToWindow(t *testing.T) { } func TestCheckWindow(t *testing.T) { + //stm: @CMD_HEALTH_APPEND_CIDS_001, @CMD_HEALTH_CHECK_WINDOW_001 assert := assert.New(t) threshold := 3 diff --git a/cmd/lotus-miner/actor_test.go b/cmd/lotus-miner/actor_test.go index 5650a9ac5..9df553cd6 100644 --- a/cmd/lotus-miner/actor_test.go +++ b/cmd/lotus-miner/actor_test.go @@ -1,3 +1,4 @@ +//stm: #unit package main import ( @@ -23,6 +24,7 @@ import ( ) func TestWorkerKeyChange(t *testing.T) { + //stm: @OTHER_WORKER_KEY_CHANGE_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/cmd/lotus-miner/allinfo_test.go b/cmd/lotus-miner/allinfo_test.go index 5f30b4fec..f64a4ab72 100644 --- a/cmd/lotus-miner/allinfo_test.go +++ b/cmd/lotus-miner/allinfo_test.go @@ -1,3 +1,4 @@ +//stm: #integration package main import ( @@ -49,6 +50,7 @@ func TestMinerAllInfo(t *testing.T) { t.Run("pre-info-all", run) + //stm: @CLIENT_DATA_IMPORT_001, @CLIENT_STORAGE_DEALS_GET_001 dh := kit.NewDealHarness(t, client, miner, miner) deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6}) outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index 32219e73b..f6629fcf4 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{} var stateList = []stateMeta{ {col: 39, state: "Total"}, {col: color.FgGreen, state: sealing.Proving}, + {col: color.FgGreen, state: sealing.Available}, {col: color.FgGreen, state: sealing.UpdateActivating}, {col: color.FgBlue, state: sealing.Empty}, diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 24098b558..1aa964f7e 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -11,9 +11,6 @@ import ( "strings" "time" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin" - "github.com/docker/go-units" "github.com/fatih/color" cbor "github.com/ipfs/go-ipld-cbor" @@ -56,7 +53,6 @@ var sectorsCmd = &cli.Command{ sectorsRemoveCmd, sectorsSnapUpCmd, sectorsSnapAbortCmd, - sectorsMarkForUpgradeCmd, sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, @@ -351,7 +347,7 @@ var sectorsListCmd = &cli.Command{ if cctx.Bool("unproven") { for state := range sealing.ExistSectorStateList { - if state == sealing.Proving { + if state == sealing.Proving || state == sealing.Available { continue } states = append(states, api.SectorState(state)) @@ -437,7 +433,7 @@ var sectorsListCmd = &cli.Command{ const verifiedPowerGainMul = 9 dw, vp := .0, .0 - estimate := st.Expiration-st.Activation <= 0 + estimate := (st.Expiration-st.Activation <= 0) || sealing.IsUpgradeState(sealing.SectorState(st.State)) if !estimate { rdw := big.Add(st.DealWeight, st.VerifiedDealWeight) dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64()) @@ -1568,57 +1564,6 @@ var sectorsSnapAbortCmd = &cli.Command{ }, } -var sectorsMarkForUpgradeCmd = &cli.Command{ - Name: "mark-for-upgrade", - Usage: "Mark a committed capacity sector for replacement by a sector with deals", - ArgsUsage: "", - Action: func(cctx *cli.Context) error { - if cctx.Args().Len() != 1 { - return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number")) - } - - nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) - if err != nil { - return err - } - defer closer() - - api, nCloser, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer nCloser() - ctx := lcli.ReqContext(cctx) - - nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) - if err != nil { - return xerrors.Errorf("failed to get network version: %w", err) - } - if nv >= network.Version15 { - return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`") - } - - // disable mark for upgrade two days before the ntwk v15 upgrade - // TODO: remove the following block in v1.15.1 - head, err := api.ChainHead(ctx) - if err != nil { - return xerrors.Errorf("failed to get chain head: %w", err) - } - twoDays := abi.ChainEpoch(2 * builtin.EpochsInDay) - if head.Height() > (build.UpgradeOhSnapHeight - twoDays) { - return xerrors.Errorf("OhSnap is coming soon, " + - "please use `snap-up` to upgrade your cc sectors after the network v15 upgrade!") - } - - id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) - if err != nil { - return xerrors.Errorf("could not parse sector number: %w", err) - } - - return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false) - }, -} - var sectorsStartSealCmd = &cli.Command{ Name: "seal", Usage: "Manually start sealing a sector (filling any unused space with junk)", diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index 0fea2a3a5..ba2e6841c 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -598,7 +598,7 @@ var storageListSectorsCmd = &cli.Command{ ft storiface.SectorFileType urls string - primary, seal, store bool + primary, copy, main, seal, store bool state api.SectorState } @@ -626,8 +626,11 @@ var storageListSectorsCmd = &cli.Command{ urls: strings.Join(info.URLs, ";"), primary: info.Primary, - seal: info.CanSeal, - store: info.CanStore, + copy: !info.Primary && len(si) > 1, + main: !info.Primary && len(si) == 1, // only copy, but not primary + + seal: info.CanSeal, + store: info.CanStore, state: st.State, }) @@ -680,7 +683,7 @@ var storageListSectorsCmd = &cli.Command{ "Sector": e.id, "Type": e.ft.String(), "State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state), - "Primary": maybeStr(e.seal, color.FgGreen, "primary"), + "Primary": maybeStr(e.primary, color.FgGreen, "primary") + maybeStr(e.copy, color.FgBlue, "copy") + maybeStr(e.main, color.FgRed, "main"), "Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"), "URLs": e.urls, } diff --git a/cmd/lotus-shed/datastore-vlog.go b/cmd/lotus-shed/datastore-vlog.go new file mode 100644 index 000000000..7f0c708a5 --- /dev/null +++ b/cmd/lotus-shed/datastore-vlog.go @@ -0,0 +1,342 @@ +package main + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "os" + "strings" + + "github.com/dgraph-io/badger/v2/y" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-base32" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var datastoreVlog2CarCmd = &cli.Command{ + Name: "vlog2car", + Usage: "convert badger blockstore .vlog to .car", + Flags: []cli.Flag{ + &cli.PathFlag{ + Name: "vlog", + Usage: "vlog file", + Required: true, + }, + &cli.PathFlag{ + Name: "car", + Usage: "out car file name (no .car)", + Required: true, + }, + &cli.StringFlag{ + Name: "key-prefix", + Usage: "datastore prefix", + Value: "/blocks/", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := cctx.Context + + maxSz := uint64(1 << 20) + + carb := &rawCarb{ + max: maxSz, + blocks: map[cid.Cid]block.Block{}, + } + cars := 0 + + pref := cctx.String("key-prefix") + plen := len(pref) + + { + // NOTE: Some bits of code in this code block come from https://github.com/dgraph-io/badger, which is licensed + // under Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE + + vf, err := os.Open(cctx.Path("vlog")) + if err != nil { + return xerrors.Errorf("open vlog file: %w", err) + } + + if _, err := vf.Seek(20, io.SeekStart); err != nil { + return xerrors.Errorf("seek past vlog start: %w", err) + } + + reader := bufio.NewReader(vf) + read := &safeRead{ + k: make([]byte, 10), + v: make([]byte, 10), + recordOffset: 20, + } + + loop: + for { + e, err := read.Entry(reader) + switch { + case err == io.EOF: + break loop + case err == io.ErrUnexpectedEOF || err == errTruncate: + break loop + case err != nil: + return xerrors.Errorf("entry read error: %w", err) + case e == nil: + continue + } + + if e.meta&0x40 > 0 { + e.Key = e.Key[:len(e.Key)-8] + } else if e.meta > 0 { + if e.meta&0x3f > 0 { + log.Infof("unk meta m:%x; k:%x, v:%60x", e.meta, e.Key, e.Value) + } + continue + } + + { + if plen > 0 && !strings.HasPrefix(string(e.Key), pref) { + log.Infow("no blocks prefix", "key", string(e.Key)) + continue + } + + h, err := base32.RawStdEncoding.DecodeString(string(e.Key[plen:])) + if err != nil { + return xerrors.Errorf("decode b32 ds key %x: %w", e.Key, err) + } + + c := cid.NewCidV1(cid.Raw, h) + + b, err := block.NewBlockWithCid(e.Value, c) + if err != nil { + return xerrors.Errorf("readblk: %w", err) + } + + err = carb.consume(c, b) + switch err { + case nil: + case errFullCar: + root, err := carb.finalize() + if err != nil { + return xerrors.Errorf("carb finalize: %w", err) + } + + if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil { + return xerrors.Errorf("writeCar: %w", err) + } + + cars++ + + carb = &rawCarb{ + max: maxSz, + blocks: map[cid.Cid]block.Block{}, + } + + default: + return xerrors.Errorf("carb consume: %w", err) + } + } + } + + if err := vf.Close(); err != nil { + return err + } + } + + root, err := carb.finalize() + if err != nil { + return xerrors.Errorf("carb finalize: %w", err) + } + + if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil { + return xerrors.Errorf("writeCar: %w", err) + } + + return nil + + }, +} + +// NOTE: Code below comes (with slight modifications) from https://github.com/dgraph-io/badger/blob/master/value.go +// Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE + +var errTruncate = errors.New("do truncate") + +// hashReader implements io.Reader, io.ByteReader interfaces. It also keeps track of the number +// bytes read. The hashReader writes to h (hash) what it reads from r. +type hashReader struct { + r io.Reader + h hash.Hash32 + bytesRead int // Number of bytes read. +} + +func newHashReader(r io.Reader) *hashReader { + hash := crc32.New(y.CastagnoliCrcTable) + return &hashReader{ + r: r, + h: hash, + } +} + +// Read reads len(p) bytes from the reader. Returns the number of bytes read, error on failure. +func (t *hashReader) Read(p []byte) (int, error) { + n, err := t.r.Read(p) + if err != nil { + return n, err + } + t.bytesRead += n + return t.h.Write(p[:n]) +} + +// ReadByte reads exactly one byte from the reader. Returns error on failure. +func (t *hashReader) ReadByte() (byte, error) { + b := make([]byte, 1) + _, err := t.Read(b) + return b[0], err +} + +// Sum32 returns the sum32 of the underlying hash. +func (t *hashReader) Sum32() uint32 { + return t.h.Sum32() +} + +type safeRead struct { + k []byte + v []byte + + recordOffset uint32 +} + +// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by +// the user to set data. +type Entry struct { + Key []byte + Value []byte + UserMeta byte + ExpiresAt uint64 // time.Unix + meta byte + + // Fields maintained internally. + offset uint32 + hlen int // Length of the header. +} + +// Entry reads an entry from the provided reader. It also validates the checksum for every entry +// read. Returns error on failure. +func (r *safeRead) Entry(reader io.Reader) (*Entry, error) { + tee := newHashReader(reader) + var h header + hlen, err := h.DecodeFrom(tee) + if err != nil { + return nil, err + } + if h.klen > uint32(1<<16) { // Key length must be below uint16. + return nil, errTruncate + } + kl := int(h.klen) + if cap(r.k) < kl { + r.k = make([]byte, 2*kl) + } + vl := int(h.vlen) + if cap(r.v) < vl { + r.v = make([]byte, 2*vl) + } + + e := &Entry{} + e.offset = r.recordOffset + e.hlen = hlen + buf := make([]byte, h.klen+h.vlen) + if _, err := io.ReadFull(tee, buf[:]); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + e.Key = buf[:h.klen] + e.Value = buf[h.klen:] + var crcBuf [crc32.Size]byte + if _, err := io.ReadFull(reader, crcBuf[:]); err != nil { + if err == io.EOF { + err = errTruncate + } + return nil, err + } + crc := y.BytesToU32(crcBuf[:]) + if crc != tee.Sum32() { + return nil, errTruncate + } + e.meta = h.meta + e.UserMeta = h.userMeta + e.ExpiresAt = h.expiresAt + return e, nil +} + +// header is used in value log as a header before Entry. +type header struct { + klen uint32 + vlen uint32 + expiresAt uint64 + meta byte + userMeta byte +} + +// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The +// function will panic if out []byte isn't large enough to hold all the values. +// The encoded header looks like +// +------+----------+------------+--------------+-----------+ +// | Meta | UserMeta | Key Length | Value Length | ExpiresAt | +// +------+----------+------------+--------------+-----------+ +func (h header) Encode(out []byte) int { + out[0], out[1] = h.meta, h.userMeta + index := 2 + index += binary.PutUvarint(out[index:], uint64(h.klen)) + index += binary.PutUvarint(out[index:], uint64(h.vlen)) + index += binary.PutUvarint(out[index:], h.expiresAt) + return index +} + +// Decode decodes the given header from the provided byte slice. +// Returns the number of bytes read. +func (h *header) Decode(buf []byte) int { + h.meta, h.userMeta = buf[0], buf[1] + index := 2 + klen, count := binary.Uvarint(buf[index:]) + h.klen = uint32(klen) + index += count + vlen, count := binary.Uvarint(buf[index:]) + h.vlen = uint32(vlen) + index += count + h.expiresAt, count = binary.Uvarint(buf[index:]) + return index + count +} + +// DecodeFrom reads the header from the hashReader. +// Returns the number of bytes read. +func (h *header) DecodeFrom(reader *hashReader) (int, error) { + var err error + h.meta, err = reader.ReadByte() + if err != nil { + return 0, err + } + h.userMeta, err = reader.ReadByte() + if err != nil { + return 0, err + } + klen, err := binary.ReadUvarint(reader) + if err != nil { + return 0, err + } + h.klen = uint32(klen) + vlen, err := binary.ReadUvarint(reader) + if err != nil { + return 0, err + } + h.vlen = uint32(vlen) + h.expiresAt, err = binary.ReadUvarint(reader) + if err != nil { + return 0, err + } + return reader.bytesRead, nil +} diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index 5c5567654..7cdb2b1e6 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -32,6 +32,7 @@ var datastoreCmd = &cli.Command{ datastoreListCmd, datastoreGetCmd, datastoreRewriteCmd, + datastoreVlog2CarCmd, }, } diff --git a/cmd/lotus-shed/deal-label.go b/cmd/lotus-shed/deal-label.go new file mode 100644 index 000000000..483415987 --- /dev/null +++ b/cmd/lotus-shed/deal-label.go @@ -0,0 +1,118 @@ +package main + +import ( + "context" + "fmt" + "io" + "unicode/utf8" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/consensus/filcns" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" +) + +var dealLabelCmd = &cli.Command{ + Name: "deal-label", + Usage: "Scrape state to report on how many deals have non UTF-8 labels", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if !cctx.Args().Present() { + return fmt.Errorf("must pass state root") + } + + sroot, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + tree, err := state.LoadStateTree(cst, sroot) + if err != nil { + return err + } + + ma, err := tree.GetActor(market.Address) + if err != nil { + return err + } + + ms, err := market.Load(store, ma) + if err != nil { + return err + } + + ps, err := ms.Proposals() + if err != nil { + return err + } + + var deals []abi.DealID + if err = ps.ForEach(func(id abi.DealID, dp market.DealProposal) error { + if !utf8.Valid([]byte(dp.Label)) { + deals = append(deals, id) + } + + return nil + }); err != nil { + return err + } + + fmt.Println("there are ", len(deals), " bad labels") + for _, d := range deals { + fmt.Print(d, " ") + } + + return nil + }, +} diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index 3851e4922..fec4d575a 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -1,19 +1,38 @@ package main import ( + "bytes" "context" + "errors" "fmt" "io" "os" + "path/filepath" + "runtime" + "strings" + "sync" + "github.com/dgraph-io/badger/v2" + "github.com/dgraph-io/badger/v2/pb" + "github.com/dustin/go-humanize" + "github.com/filecoin-project/go-state-types/abi" + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + "github.com/ipld/go-car" + "github.com/multiformats/go-base32" + mh "github.com/multiformats/go-multihash" "github.com/urfave/cli/v2" + "go.uber.org/zap" "golang.org/x/xerrors" - "github.com/filecoin-project/go-state-types/abi" - + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen" "github.com/filecoin-project/lotus/node/repo" ) @@ -39,6 +58,9 @@ var exportChainCmd = &cli.Command{ Name: "skip-old-msgs", }, }, + Subcommands: []*cli.Command{ + exportRawCmd, + }, Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name to write export to")) @@ -130,3 +152,351 @@ var exportChainCmd = &cli.Command{ return nil }, } + +var exportRawCmd = &cli.Command{ + Name: "raw", + Description: "Export raw blocks from repo (requires node to be offline)", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + &cli.StringFlag{ + Name: "car-size", + Value: "50M", + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name to write export to")) + } + + ctx := context.TODO() + + r, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return xerrors.Errorf("opening fs repo: %w", err) + } + + exists, err := r.Exists() + if err != nil { + return err + } + if !exists { + return xerrors.Errorf("lotus repo doesn't exist") + } + + lr, err := r.LockRO(repo.FullNode) + if err != nil { + return err + } + defer lr.Close() //nolint:errcheck + + out := cctx.Args().First() + err = os.Mkdir(out, 0755) + if err != nil { + return xerrors.Errorf("creating output dir: %w", err) + } + + maxSz, err := humanize.ParseBytes(cctx.String("car-size")) + if err != nil { + return xerrors.Errorf("parse --car-size: %w", err) + } + + cars := 0 + + carb := &rawCarb{ + max: maxSz, + blocks: map[cid.Cid]block.Block{}, + } + + { + consume := func(c cid.Cid, b block.Block) error { + err = carb.consume(c, b) + switch err { + case nil: + case errFullCar: + root, err := carb.finalize() + if err != nil { + return xerrors.Errorf("carb finalize: %w", err) + } + + if err := carb.writeCar(ctx, filepath.Join(out, fmt.Sprintf("chain%d.car", cars)), root); err != nil { + return xerrors.Errorf("writeCar: %w", err) + } + + cars++ + + if cars > 10 { + return xerrors.Errorf("enough") + } + + carb = &rawCarb{ + max: maxSz, + blocks: map[cid.Cid]block.Block{}, + } + + log.Infow("gc") + go runtime.GC() + + default: + return xerrors.Errorf("carb consume: %w", err) + } + return nil + } + + { + path := filepath.Join(lr.Path(), "datastore", "chain") + opts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, path, false) + if err != nil { + return err + } + + opts.Logger = &badgerLog{ + SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(), + skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(), + } + + log.Infow("open db") + + db, err := badger.Open(opts.Options) + if err != nil { + return fmt.Errorf("failed to open badger blockstore: %w", err) + } + defer db.Close() // nolint:errcheck + + log.Infow("new stream") + + var wlk sync.Mutex + + str := db.NewStream() + str.NumGo = 16 + str.LogPrefix = "bstream" + str.Send = func(list *pb.KVList) (err error) { + defer func() { + if err != nil { + log.Errorw("send error", "err", err) + } + }() + + for _, kv := range list.Kv { + if kv.Key == nil || kv.Value == nil { + continue + } + if !strings.HasPrefix(string(kv.Key), "/blocks/") { + log.Infow("no blocks prefix", "key", string(kv.Key)) + continue + } + + h, err := base32.RawStdEncoding.DecodeString(string(kv.Key[len("/blocks/"):])) + if err != nil { + return xerrors.Errorf("decode b32 ds key %x: %w", kv.Key, err) + } + + c := cid.NewCidV1(cid.Raw, h) + + b, err := block.NewBlockWithCid(kv.Value, c) + if err != nil { + return xerrors.Errorf("readblk: %w", err) + } + + wlk.Lock() + err = consume(c, b) + wlk.Unlock() + if err != nil { + return xerrors.Errorf("consume stream block: %w", err) + } + } + + return nil + } + + if err := str.Orchestrate(ctx); err != nil { + return xerrors.Errorf("orchestrate stream: %w", err) + } + } + } + + log.Infow("write last") + + root, err := carb.finalize() + if err != nil { + return xerrors.Errorf("carb finalize: %w", err) + } + + if err := carb.writeCar(ctx, filepath.Join(out, fmt.Sprintf("chain%d.car", cars)), root); err != nil { + return xerrors.Errorf("writeCar: %w", err) + } + + return nil + }, +} + +var errFullCar = errors.New("full") + +const maxlinks = 16 + +type rawCarb struct { + blockstore.Blockstore + + max, cur uint64 + + nodes []*shedgen.CarbNode + + blocks map[cid.Cid]block.Block +} + +func (rc *rawCarb) Has(ctx context.Context, c cid.Cid) (bool, error) { + _, has := rc.blocks[c] + return has, nil +} + +func (rc *rawCarb) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + b, has := rc.blocks[c] + if !has { + return nil, blockstore.ErrNotFound + } + return b, nil +} + +func (rc *rawCarb) GetSize(ctx context.Context, c cid.Cid) (int, error) { + b, has := rc.blocks[c] + if !has { + return 0, blockstore.ErrNotFound + } + return len(b.RawData()), nil +} + +func (rc *rawCarb) checkNodes(maxl int) error { + if len(rc.nodes) == 0 { + log.Infow("add level", "l", 0) + rc.nodes = append(rc.nodes, new(shedgen.CarbNode)) + } + for i := 0; i < len(rc.nodes); i++ { + if len(rc.nodes[i].Sub) <= maxl { + break + } + if len(rc.nodes) <= i+1 { + log.Infow("add level", "l", i+1) + rc.nodes = append(rc.nodes, new(shedgen.CarbNode)) + } + + var bb bytes.Buffer + if err := rc.nodes[i].MarshalCBOR(&bb); err != nil { + return err + } + c, err := cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: mh.SHA2_256, + MhLength: -1, + }.Sum(bb.Bytes()) + if err != nil { + return xerrors.Errorf("gen cid: %w", err) + } + + b, err := block.NewBlockWithCid(bb.Bytes(), c) + if err != nil { + return xerrors.Errorf("new block: %w", err) + } + + if i > 1 { + log.Infow("compact", "from", i, "to", i+1, "sub", c.String()) + } + + rc.nodes[i+1].Sub = append(rc.nodes[i+1].Sub, c) + rc.blocks[c] = b + rc.nodes[i] = new(shedgen.CarbNode) + rc.cur += uint64(bb.Len()) + } + + return nil +} + +func (rc *rawCarb) consume(c cid.Cid, b block.Block) error { + if err := rc.checkNodes(maxlinks); err != nil { + return err + } + if rc.cur+uint64(len(b.RawData())) > rc.max { + return errFullCar + } + + rc.cur += uint64(len(b.RawData())) + + b, err := block.NewBlockWithCid(b.RawData(), c) + if err != nil { + return xerrors.Errorf("create raw block: %w", err) + } + + rc.blocks[c] = b + rc.nodes[0].Sub = append(rc.nodes[0].Sub, c) + + return nil +} + +func (rc *rawCarb) finalize() (cid.Cid, error) { + if len(rc.nodes) == 0 { + rc.nodes = append(rc.nodes, new(shedgen.CarbNode)) + } + + for i := 0; i < len(rc.nodes); i++ { + var bb bytes.Buffer + if err := rc.nodes[i].MarshalCBOR(&bb); err != nil { + return cid.Undef, err + } + c, err := cid.Prefix{ + Version: 1, + Codec: cid.DagCBOR, + MhType: mh.SHA2_256, + MhLength: -1, + }.Sum(bb.Bytes()) + if err != nil { + return cid.Undef, xerrors.Errorf("gen cid: %w", err) + } + + b, err := block.NewBlockWithCid(bb.Bytes(), c) + if err != nil { + return cid.Undef, xerrors.Errorf("new block: %w", err) + } + + log.Infow("fin", "level", i, "cid", c.String()) + + rc.blocks[c] = b + rc.nodes[i] = new(shedgen.CarbNode) + rc.cur += uint64(bb.Len()) + + if len(rc.nodes[i].Sub) <= 1 && i == len(rc.nodes)-1 { + return c, err + } + if len(rc.nodes) <= i+1 { + rc.nodes = append(rc.nodes, new(shedgen.CarbNode)) + } + rc.nodes[i+1].Sub = append(rc.nodes[i+1].Sub, c) + } + return cid.Undef, xerrors.Errorf("failed to finalize") +} + +func (rc *rawCarb) writeCar(ctx context.Context, path string, root cid.Cid) error { + f, err := os.Create(path) + if err != nil { + return xerrors.Errorf("create out car: %w", err) + } + + bs := rc + ds := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) + + log.Infow("write car", "path", path, "root", root.String(), "blocks", len(rc.blocks)) + + return car.WriteCar(ctx, ds, []cid.Cid{root}, f) +} + +var _ blockstore.Blockstore = &rawCarb{} + +type badgerLog struct { + *zap.SugaredLogger + skip2 *zap.SugaredLogger +} + +func (b *badgerLog) Warningf(format string, args ...interface{}) { + b.skip2.Warnf(format, args...) +} diff --git a/cmd/lotus-shed/itestd.go b/cmd/lotus-shed/itestd.go new file mode 100644 index 000000000..3ac542d27 --- /dev/null +++ b/cmd/lotus-shed/itestd.go @@ -0,0 +1,104 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + + "github.com/chzyer/readline" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/itests/kit" +) + +var itestdCmd = &cli.Command{ + Name: "itestd", + Description: "Integration test debug env", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "listen", + Value: "127.0.0.1:5674", + }, + }, + Action: func(cctx *cli.Context) error { + var nodes []kit.ItestdNotif + + m := http.NewServeMux() + m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var notif kit.ItestdNotif + if err := json.NewDecoder(r.Body).Decode(¬if); err != nil { + fmt.Printf("!! Decode itest notif: %s\n", err) + return + } + + fmt.Printf("%d @%s '%s=%s'\n", len(nodes), notif.TestName, notif.NodeType, notif.Api) + nodes = append(nodes, notif) + }) + l, err := net.Listen("tcp", cctx.String("listen")) + if err != nil { + return xerrors.Errorf("net listen: %w", err) + } + s := &httptest.Server{ + Listener: l, + Config: &http.Server{Handler: m}, + } + s.Start() + fmt.Printf("ITest env:\n\nLOTUS_ITESTD=%s\n\nSay 'sh' to spawn a shell connected to test nodes\n--- waiting for clients\n", s.URL) + + cs := readline.NewCancelableStdin(os.Stdin) + go func() { + <-cctx.Done() + cs.Close() // nolint:errcheck + }() + + rl := bufio.NewReader(cs) + + for { + cmd, _, err := rl.ReadLine() + if err != nil { + return xerrors.Errorf("readline: %w", err) + } + + switch string(cmd) { + case "sh": + shell := "/bin/sh" + if os.Getenv("SHELL") != "" { + shell = os.Getenv("SHELL") + } + + p := exec.Command(shell, "-i") + p.Env = append(p.Env, os.Environ()...) + lastNodes := map[string]string{} + for _, node := range nodes { + lastNodes[node.NodeType] = node.Api + } + if _, found := lastNodes["MARKETS_API_INFO"]; !found { + lastNodes["MARKETS_API_INFO"] = lastNodes["MINER_API_INFO"] + } + for typ, api := range lastNodes { + p.Env = append(p.Env, fmt.Sprintf("%s=%s", typ, api)) + } + + p.Stdout = os.Stdout + p.Stderr = os.Stderr + p.Stdin = os.Stdin + if err := p.Start(); err != nil { + return xerrors.Errorf("start shell: %w", err) + } + if err := p.Wait(); err != nil { + fmt.Printf("wait for shell: %s\n", err) + } + fmt.Println("\n--- shell quit") + + default: + fmt.Println("!! Unknown command") + } + } + }, +} diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 45fd24e18..cfbc73a14 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -22,6 +22,7 @@ func main() { bitFieldCmd, cronWcCmd, frozenMinersCmd, + dealLabelCmd, keyinfoCmd, jwtCmd, noncefix, @@ -69,6 +70,7 @@ func main() { terminationsCmd, migrationsCmd, diffCmd, + itestdCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/miner-multisig.go b/cmd/lotus-shed/miner-multisig.go index d9f158090..712e45ee7 100644 --- a/cmd/lotus-shed/miner-multisig.go +++ b/cmd/lotus-shed/miner-multisig.go @@ -8,6 +8,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig" "github.com/filecoin-project/go-address" @@ -29,6 +31,9 @@ var minerMultisigsCmd = &cli.Command{ mmApproveWithdrawBalance, mmProposeChangeOwner, mmApproveChangeOwner, + mmProposeChangeWorker, + mmConfirmChangeWorker, + mmProposeControlSet, }, Flags: []cli.Flag{ &cli.StringFlag{ @@ -368,6 +373,301 @@ var mmApproveChangeOwner = &cli.Command{ }, } +var mmProposeChangeWorker = &cli.Command{ + Name: "propose-change-worker", + Usage: "Propose an worker address change", + ArgsUsage: "[newWorker]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass new worker address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + if mi.Worker == newAddr { + return fmt.Errorf("worker address already set to %s", na) + } + } else { + if mi.NewWorker == newAddr { + fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na) + fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch) + return fmt.Errorf("change to worker address %s already pending", na) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: newAddr, + NewControlAddrs: mi.ControlAddresses, + } + + fmt.Fprintf(cctx.App.Writer, "newAddr: %s\n", newAddr) + fmt.Fprintf(cctx.App.Writer, "NewControlAddrs: %s\n", mi.ControlAddresses) + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeWorkerAddress), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + + return nil + }, +} + +var mmConfirmChangeWorker = &cli.Command{ + Name: "confirm-change-worker", + Usage: "Confirm an worker address change", + ArgsUsage: "[newWorker]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass new worker address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + na, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + if mi.NewWorker.Empty() { + return xerrors.Errorf("no worker key change proposed") + } else if mi.NewWorker != newAddr { + return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker) + } + + if head, err := api.ChainHead(ctx); err != nil { + return xerrors.Errorf("failed to get the chain head: %w", err) + } else if head.Height() < mi.WorkerChangeEpoch { + return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height()) + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ConfirmUpdateWorkerKey), nil) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + return nil + }, +} + +var mmProposeControlSet = &cli.Command{ + Name: "propose-control-set", + Usage: "Set control address(-es)", + ArgsUsage: "[...address]", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass new owner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + multisigAddr, sender, minerAddr, err := getInputs(cctx) + if err != nil { + return err + } + + mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK) + if err != nil { + return err + } + + del := map[address.Address]struct{}{} + existing := map[address.Address]struct{}{} + for _, controlAddress := range mi.ControlAddresses { + ka, err := api.StateAccountKey(ctx, controlAddress, types.EmptyTSK) + if err != nil { + return err + } + + del[ka] = struct{}{} + existing[ka] = struct{}{} + } + + var toSet []address.Address + + for i, as := range cctx.Args().Slice() { + a, err := address.NewFromString(as) + if err != nil { + return xerrors.Errorf("parsing address %d: %w", i, err) + } + + ka, err := api.StateAccountKey(ctx, a, types.EmptyTSK) + if err != nil { + return err + } + + // make sure the address exists on chain + _, err = api.StateLookupID(ctx, ka, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("looking up %s: %w", ka, err) + } + + delete(del, ka) + toSet = append(toSet, ka) + } + + for a := range del { + fmt.Println("Remove", a) + } + for _, a := range toSet { + if _, exists := existing[a]; !exists { + fmt.Println("Add", a) + } + } + + cwp := &miner2.ChangeWorkerAddressParams{ + NewWorker: mi.Worker, + NewControlAddrs: toSet, + } + + sp, err := actors.SerializeParams(cwp) + if err != nil { + return xerrors.Errorf("serializing params: %w", err) + } + + pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeWorkerAddress), sp) + if err != nil { + return xerrors.Errorf("proposing message: %w", err) + } + + fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid) + + // wait for it to get mined into a block + wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence) + if err != nil { + return err + } + + // check it executed successfully + if wait.Receipt.ExitCode != 0 { + fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!") + return err + } + + var retval msig5.ProposeReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return fmt.Errorf("failed to unmarshal propose return value: %w", err) + } + + fmt.Printf("Transaction ID: %d\n", retval.TxnID) + if retval.Applied { + fmt.Printf("Transaction was executed during propose\n") + fmt.Printf("Exit Code: %d\n", retval.Code) + fmt.Printf("Return Value: %x\n", retval.Ret) + } + return nil + }, +} + func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) { multisigAddr, err := address.NewFromString(cctx.String("multisig")) if err != nil { diff --git a/cmd/lotus-shed/shedgen/cbor_gen.go b/cmd/lotus-shed/shedgen/cbor_gen.go new file mode 100644 index 000000000..37ed95539 --- /dev/null +++ b/cmd/lotus-shed/shedgen/cbor_gen.go @@ -0,0 +1,128 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package shedgen + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +func (t *CarbNode) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write([]byte{161}); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Sub ([]cid.Cid) (slice) + if len("Sub") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Sub\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sub"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Sub")); err != nil { + return err + } + + if len(t.Sub) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Sub was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sub))); err != nil { + return err + } + for _, v := range t.Sub { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.Sub: %w", err) + } + } + return nil +} + +func (t *CarbNode) UnmarshalCBOR(r io.Reader) error { + *t = CarbNode{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") + } + + if extra > cbg.MaxLength { + return fmt.Errorf("CarbNode: map struct too large (%d)", extra) + } + + var name string + n := extra + + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) + } + + switch name { + // t.Sub ([]cid.Cid) (slice) + case "Sub": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Sub: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Sub = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.Sub failed: %w", err) + } + t.Sub[i] = c + } + + default: + // Field doesn't exist on this type, so ignore it + cbg.ScanForLinks(r, func(cid.Cid) {}) + } + } + + return nil +} diff --git a/cmd/lotus-shed/shedgen/rawexport.go b/cmd/lotus-shed/shedgen/rawexport.go new file mode 100644 index 000000000..ca430c5e6 --- /dev/null +++ b/cmd/lotus-shed/shedgen/rawexport.go @@ -0,0 +1,7 @@ +package shedgen + +import "github.com/ipfs/go-cid" + +type CarbNode struct { + Sub []cid.Cid +} diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go index fb822eb6e..392eaa7c8 100644 --- a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -44,7 +44,7 @@ type BlockBuilder struct { parentTs *types.TipSet parentSt *state.StateTree - vm *vm.VM + vm *vm.LegacyVM sm *stmgr.StateManager gasTotal int64 @@ -73,9 +73,9 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S parentSt: parentSt, } - // Then we construct a VM to execute messages for gas estimation. + // Then we construct a LegacyVM to execute messages for gas estimation. // - // Most parts of this VM are "real" except: + // Most parts of this LegacyVM are "real" except: // 1. We don't charge a fee. // 2. The runtime has "fake" proof logic. // 3. We don't actually save any of the results. @@ -92,7 +92,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S BaseFee: abi.NewTokenAmount(0), LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs), } - bb.vm, err = vm.NewVM(bb.ctx, vmopt) + bb.vm, err = vm.NewLegacyVM(bb.ctx, vmopt) if err != nil { return nil, err } @@ -190,12 +190,12 @@ func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt, return &ret.MessageReceipt, nil } -// ActorStore returns the VM's current (pending) blockstore. +// ActorStore returns the LegacyVM's current (pending) blockstore. func (bb *BlockBuilder) ActorStore() adt.Store { return bb.vm.ActorStore(bb.ctx) } -// StateTree returns the VM's current (pending) state-tree. This includes any changes made by +// StateTree returns the LegacyVM's current (pending) state-tree. This includes any changes made by // successfully pushed messages. // // You probably want ParentStateTree diff --git a/cmd/lotus-sim/simulation/stages/commit_queue_test.go b/cmd/lotus-sim/simulation/stages/commit_queue_test.go index 8ab05250e..503228d38 100644 --- a/cmd/lotus-sim/simulation/stages/commit_queue_test.go +++ b/cmd/lotus-sim/simulation/stages/commit_queue_test.go @@ -1,3 +1,4 @@ +//stm: #unit package stages import ( @@ -13,6 +14,7 @@ import ( ) func TestCommitQueue(t *testing.T) { + //stm: @CMD_COMMIT_Q_ENQUEUE_COMMIT_001 var q commitQueue addr1, err := address.NewIDAddress(1000) require.NoError(t, err) @@ -46,6 +48,7 @@ func TestCommitQueue(t *testing.T) { SectorNumber: 6, })) + //stm: @CMD_COMMIT_Q_ADVANCE_EPOCH_001, @CMD_COMMIT_Q_NEXT_MINER_001 epoch := abi.ChainEpoch(0) q.advanceEpoch(epoch) _, _, ok := q.nextMiner() diff --git a/cmd/tvx/codenames_test.go b/cmd/tvx/codenames_test.go index e7136d6cc..9fd6870cb 100644 --- a/cmd/tvx/codenames_test.go +++ b/cmd/tvx/codenames_test.go @@ -1,3 +1,4 @@ +//stm: #unit package main import ( @@ -10,6 +11,7 @@ import ( ) func TestProtocolCodenames(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_EPOCH_CODENAMES_001 if height := abi.ChainEpoch(100); GetProtocolCodename(height) != "genesis" { t.Fatal("expected genesis codename") } diff --git a/conformance/chaos/actor_test.go b/conformance/chaos/actor_test.go index e68b9a4df..b21b97ec7 100644 --- a/conformance/chaos/actor_test.go +++ b/conformance/chaos/actor_test.go @@ -1,3 +1,4 @@ +//stm: #chaos package chaos import ( @@ -15,6 +16,7 @@ import ( ) func TestSingleton(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_BUILDER_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -29,6 +31,7 @@ func TestSingleton(t *testing.T) { } func TestCallerValidationNone(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -40,6 +43,7 @@ func TestCallerValidationNone(t *testing.T) { } func TestCallerValidationIs(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001 caller := atesting2.NewIDAddr(t, 100) receiver := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) @@ -69,6 +73,7 @@ func TestCallerValidationIs(t *testing.T) { } func TestCallerValidationType(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001 caller := atesting2.NewIDAddr(t, 100) receiver := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) @@ -95,6 +100,7 @@ func TestCallerValidationType(t *testing.T) { } func TestCallerValidationInvalidBranch(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -108,6 +114,7 @@ func TestCallerValidationInvalidBranch(t *testing.T) { } func TestDeleteActor(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CREATE_ACTOR_001 receiver := atesting2.NewIDAddr(t, 100) beneficiary := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) @@ -122,6 +129,7 @@ func TestDeleteActor(t *testing.T) { } func TestMutateStateInTransaction(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -149,6 +157,7 @@ func TestMutateStateInTransaction(t *testing.T) { } func TestMutateStateAfterTransaction(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -183,6 +192,7 @@ func TestMutateStateAfterTransaction(t *testing.T) { } func TestMutateStateReadonly(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -217,6 +227,7 @@ func TestMutateStateReadonly(t *testing.T) { } func TestMutateStateInvalidBranch(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -231,6 +242,7 @@ func TestMutateStateInvalidBranch(t *testing.T) { } func TestAbortWith(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_ABORT_WITH_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -249,6 +261,7 @@ func TestAbortWith(t *testing.T) { } func TestAbortWithUncontrolled(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_ABORT_WITH_001 receiver := atesting2.NewIDAddr(t, 100) builder := mock2.NewBuilder(context.Background(), receiver) @@ -266,6 +279,7 @@ func TestAbortWithUncontrolled(t *testing.T) { } func TestInspectRuntime(t *testing.T) { + //stm: @CHAIN_ACTOR_CHAOS_INSPECT_RUNTIME_001, @CHAIN_ACTOR_CHAOS_CREATE_STATE_001 caller := atesting2.NewIDAddr(t, 100) receiver := atesting2.NewIDAddr(t, 101) builder := mock2.NewBuilder(context.Background(), receiver) diff --git a/conformance/corpus_test.go b/conformance/corpus_test.go index b9ba062cc..55c1cf08e 100644 --- a/conformance/corpus_test.go +++ b/conformance/corpus_test.go @@ -1,3 +1,6 @@ +//stm: ignore +// This file does not test any behaviors by itself; rather, it runs other test files +// Therefore, this file should not be annotated. package conformance import ( diff --git a/conformance/driver.go b/conformance/driver.go index a065d1530..f6ca9f9db 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -155,12 +155,12 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params results: []*vm.ApplyRet{}, } - sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { vmopt.CircSupplyCalc = func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { return big.Zero(), nil } - return vm.NewVM(ctx, vmopt) + return vm.NewLegacyVM(ctx, vmopt) }) postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(), @@ -226,7 +226,7 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP NetworkVersion: params.NetworkVersion, } - lvm, err := vm.NewVM(context.TODO(), vmOpts) + lvm, err := vm.NewLegacyVM(context.TODO(), vmOpts) if err != nil { return nil, cid.Undef, err } diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index c7b04f323..a941d04c0 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -939,7 +939,7 @@ Response: { "ReceivingTransfers": [ { - "RequestID": 4, + "RequestID": {}, "RequestState": "string value", "IsCurrentChannelRequest": true, "ChannelID": { @@ -983,7 +983,7 @@ Response: ], "SendingTransfers": [ { - "RequestID": 4, + "RequestID": {}, "RequestState": "string value", "IsCurrentChannelRequest": true, "ChannelID": { diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 382fe4265..6e7bd8eff 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.15.1-dev + 1.15.2-dev COMMANDS: init Initialize a lotus miner repo @@ -1664,7 +1664,6 @@ COMMANDS: remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) snap-up Mark a committed capacity sector to be filled with deals abort-upgrade Abort the attempted (SnapDeals) upgrade of a CC sector, reverting it to as before - mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals seal Manually start sealing a sector (filling any unused space with junk) set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts get-cc-collateral Get the collateral required to pledge a committed capacity sector @@ -1912,19 +1911,6 @@ OPTIONS: ``` -### lotus-miner sectors mark-for-upgrade -``` -NAME: - lotus-miner sectors mark-for-upgrade - Mark a committed capacity sector for replacement by a sector with deals - -USAGE: - lotus-miner sectors mark-for-upgrade [command options] - -OPTIONS: - --help, -h show help (default: false) - -``` - ### lotus-miner sectors seal ``` NAME: diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index f9ca24123..197158302 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.15.1-dev + 1.15.2-dev COMMANDS: run Start lotus worker diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index a9697f3af..a08f01039 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.15.1-dev + 1.15.2-dev COMMANDS: daemon Start a lotus daemon process @@ -770,7 +770,7 @@ NAME: lotus client get-deal - Print detailed deal information USAGE: - lotus client get-deal [command options] [arguments...] + lotus client get-deal [command options] [proposalCID] CATEGORY: STORAGE diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index ad917814b..1c9caf3a0 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -23,6 +23,12 @@ #DisableMetadataLog = false +[Logging] + [Logging.SubsystemLevels] + # env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM + #example-subsystem = "INFO" + + [Libp2p] # Binding address for the libp2p host - 0 means random port. # Format: multiaddress; see https://multiformats.io/multiaddr/ diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index 00370a9cc..de45be748 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -23,6 +23,12 @@ #DisableMetadataLog = false +[Logging] + [Logging.SubsystemLevels] + # env var: LOTUS_LOGGING_SUBSYSTEMLEVELS_EXAMPLE-SUBSYSTEM + #example-subsystem = "INFO" + + [Libp2p] # Binding address for the libp2p host - 0 means random port. # Format: multiaddress; see https://multiformats.io/multiaddr/ @@ -359,6 +365,12 @@ # env var: LOTUS_SEALING_FINALIZEEARLY #FinalizeEarly = false + # After sealing CC sectors, make them available for upgrading with deals + # + # type: bool + # env var: LOTUS_SEALING_MAKECCSECTORSAVAILABLE + #MakeCCSectorsAvailable = false + # Whether to use available miner balance for sector collateral instead of sending it with each message # # type: bool diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 5ec5d805c..c2668aa67 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 5ec5d805c01ea85224f6448dd6c6fa0a2a73c028 +Subproject commit c2668aa67ec589a773153022348b9c0ed6ed4d5d diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index cf8978464..191d8a9d1 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -295,7 +295,7 @@ func TestSealAndVerify(t *testing.T) { if err != nil { t.Fatalf("%+v", err) } - cleanup := func() { + t.Cleanup(func() { if t.Failed() { fmt.Printf("not removing %s\n", cdir) return @@ -303,8 +303,7 @@ func TestSealAndVerify(t *testing.T) { if err := os.RemoveAll(cdir); err != nil { t.Error(err) } - } - defer cleanup() + }) si := storage.SectorRef{ ID: abi.SectorID{Miner: miner, Number: 1}, @@ -369,7 +368,7 @@ func TestSealPoStNoCommit(t *testing.T) { t.Fatalf("%+v", err) } - cleanup := func() { + t.Cleanup(func() { if t.Failed() { fmt.Printf("not removing %s\n", dir) return @@ -377,8 +376,7 @@ func TestSealPoStNoCommit(t *testing.T) { if err := os.RemoveAll(dir); err != nil { t.Error(err) } - } - defer cleanup() + }) si := storage.SectorRef{ ID: abi.SectorID{Miner: miner, Number: 1}, @@ -434,13 +432,11 @@ func TestSealAndVerify3(t *testing.T) { t.Fatalf("%+v", err) } - cleanup := func() { + t.Cleanup(func() { if err := os.RemoveAll(dir); err != nil { t.Error(err) } - } - - defer cleanup() + }) var wg sync.WaitGroup @@ -512,7 +508,7 @@ func TestSealAndVerifyAggregate(t *testing.T) { if err != nil { t.Fatalf("%+v", err) } - cleanup := func() { + t.Cleanup(func() { if t.Failed() { fmt.Printf("not removing %s\n", cdir) return @@ -520,8 +516,7 @@ func TestSealAndVerifyAggregate(t *testing.T) { if err := os.RemoveAll(cdir); err != nil { t.Error(err) } - } - defer cleanup() + }) avi := proof5.AggregateSealVerifyProofAndInfos{ Miner: miner, @@ -917,7 +912,7 @@ func TestMulticoreSDR(t *testing.T) { t.Fatalf("%+v", err) } - cleanup := func() { + t.Cleanup(func() { if t.Failed() { fmt.Printf("not removing %s\n", dir) return @@ -925,8 +920,7 @@ func TestMulticoreSDR(t *testing.T) { if err := os.RemoveAll(dir); err != nil { t.Error(err) } - } - defer cleanup() + }) si := storage.SectorRef{ ID: abi.SectorID{Miner: miner, Number: 1}, diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index 28e071559..70195d333 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -12,6 +12,7 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/mitchellh/go-homedir" + "go.uber.org/multierr" "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" @@ -548,10 +549,10 @@ func (m *Manager) FinalizeSector(ctx context.Context, sector storage.SectorRef, } } - selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed, false) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache, false) err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalize, selector, - m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|unsealed, pathType, storiface.AcquireMove), + m.schedFetch(sector, storiface.FTCache|unsealed, pathType, storiface.AcquireMove), func(ctx context.Context, w Worker) error { _, err := m.waitSimpleCall(ctx)(w.FinalizeSector(ctx, sector, keepUnsealed)) return err @@ -589,7 +590,7 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect return xerrors.Errorf("acquiring sector lock: %w", err) } - fts := storiface.FTUnsealed + moveUnsealed := storiface.FTUnsealed { unsealedStores, err := m.index.StorageFindSector(ctx, sector.ID, storiface.FTUnsealed, 0, false) if err != nil { @@ -597,7 +598,7 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect } if len(unsealedStores) == 0 { // Is some edge-cases unsealed sector may not exist already, that's fine - fts = storiface.FTNone + moveUnsealed = storiface.FTNone } } @@ -616,10 +617,10 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect } } - selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, false) + selector := newExistingSelector(m.index, sector.ID, storiface.FTCache|storiface.FTUpdateCache, false) err := m.sched.Schedule(ctx, sector, sealtasks.TTFinalizeReplicaUpdate, selector, - m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|fts, pathType, storiface.AcquireMove), + m.schedFetch(sector, storiface.FTCache|storiface.FTUpdateCache|moveUnsealed, pathType, storiface.AcquireMove), func(ctx context.Context, w Worker) error { _, err := m.waitSimpleCall(ctx)(w.FinalizeReplicaUpdate(ctx, sector, keepUnsealed)) return err @@ -628,22 +629,30 @@ func (m *Manager) FinalizeReplicaUpdate(ctx context.Context, sector storage.Sect return err } - fetchSel := newAllocSelector(m.index, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathStorage) - moveUnsealed := fts - { - if len(keepUnsealed) == 0 { - moveUnsealed = storiface.FTNone + move := func(types storiface.SectorFileType) error { + fetchSel := newAllocSelector(m.index, types, storiface.PathStorage) + { + if len(keepUnsealed) == 0 { + moveUnsealed = storiface.FTNone + } } + + err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, + m.schedFetch(sector, types, storiface.PathStorage, storiface.AcquireMove), + func(ctx context.Context, w Worker) error { + _, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, types)) + return err + }) + if err != nil { + return xerrors.Errorf("moving sector to storage: %w", err) + } + return nil } - err = m.sched.Schedule(ctx, sector, sealtasks.TTFetch, fetchSel, - m.schedFetch(sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed, storiface.PathStorage, storiface.AcquireMove), - func(ctx context.Context, w Worker) error { - _, err := m.waitSimpleCall(ctx)(w.MoveStorage(ctx, sector, storiface.FTCache|storiface.FTSealed|storiface.FTUpdate|storiface.FTUpdateCache|moveUnsealed)) - return err - }) - if err != nil { - return xerrors.Errorf("moving sector to storage: %w", err) + err = multierr.Append(move(storiface.FTUpdate|storiface.FTUpdateCache), move(storiface.FTCache)) + err = multierr.Append(err, move(storiface.FTSealed)) // Sealed separate from cache just in case ReleaseSectorKey was already called + if moveUnsealed != storiface.FTNone { + err = multierr.Append(err, move(moveUnsealed)) } return nil diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 20abad309..ecaeaa168 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -426,11 +426,19 @@ func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSeal } func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { - if uint64(offset) != 0 { - panic("implme") + off := storiface.UnpaddedByteIndex(0) + var piece cid.Cid + for _, c := range mgr.sectors[sector.ID].pieces { + piece = c + if off >= offset { + break + } + off += storiface.UnpaddedByteIndex(len(mgr.pieces[piece])) } - - br := bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size]) + if off > offset { + panic("non-aligned offset todo") + } + br := bytes.NewReader(mgr.pieces[piece][:size]) return struct { io.ReadCloser diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go index 4622289e8..72e09df06 100644 --- a/extern/sector-storage/piece_provider.go +++ b/extern/sector-storage/piece_provider.go @@ -166,7 +166,7 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, r, err := p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) - log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) + log.Debugf("result of first tryReadUnsealedPiece: r=%s, err=%s", r, err) if xerrors.Is(err, storiface.ErrSectorNotFound) { log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 80fa87408..6b7d24904 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -172,7 +172,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R return } - if err := handler.Local.Remove(r.Context(), id, ft, false, []ID{ID(r.FormValue("keep"))}); err != nil { + if err := handler.Local.Remove(r.Context(), id, ft, false, ParseIDList(r.FormValue("keep"))); err != nil { log.Errorf("%+v", err) w.WriteHeader(500) return diff --git a/extern/sector-storage/stores/http_handler_test.go b/extern/sector-storage/stores/http_handler_test.go index 673aba55d..257807574 100644 --- a/extern/sector-storage/stores/http_handler_test.go +++ b/extern/sector-storage/stores/http_handler_test.go @@ -397,12 +397,7 @@ func TestRemoteGetSector(t *testing.T) { stat, err := os.Stat(tempFile2.Name()) require.NoError(t, err) - tempDir, err := ioutil.TempDir("", "TestRemoteGetSector-") - require.NoError(t, err) - - defer func() { - _ = os.RemoveAll(tempDir) - }() + tempDir := t.TempDir() require.NoError(t, os.Rename(tempFile2.Name(), filepath.Join(tempDir, stat.Name()))) diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index a90cdf0b9..35a1da693 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -7,6 +7,7 @@ import ( "net/url" gopath "path" "sort" + "strings" "sync" "time" @@ -29,6 +30,27 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 // filesystem, local or networked / shared by multiple machines type ID string +const IDSep = "." + +type IDList []ID + +func (il IDList) String() string { + l := make([]string, len(il)) + for i, id := range il { + l[i] = string(id) + } + return strings.Join(l, IDSep) +} + +func ParseIDList(s string) IDList { + strs := strings.Split(s, IDSep) + out := make([]ID, len(strs)) + for i, str := range strs { + out[i] = ID(str) + } + return out +} + type Group = string type StorageInfo struct { diff --git a/extern/sector-storage/stores/local_test.go b/extern/sector-storage/stores/local_test.go index ac5f6f341..cd8222a93 100644 --- a/extern/sector-storage/stores/local_test.go +++ b/extern/sector-storage/stores/local_test.go @@ -74,8 +74,7 @@ var _ LocalStorage = &TestingLocalStorage{} func TestLocalStorage(t *testing.T) { ctx := context.TODO() - root, err := ioutil.TempDir("", "sector-storage-teststorage-") - require.NoError(t, err) + root := t.TempDir() tstor := &TestingLocalStorage{ root: root, diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index bd6b34be3..42a41f788 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -44,12 +44,36 @@ type Remote struct { pfHandler PartialFileHandler } -func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { - // TODO: do this on remotes too - // (not that we really need to do that since it's always called by the - // worker which pulled the copy) +func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, typ storiface.SectorFileType) error { + if bits.OnesCount(uint(typ)) != 1 { + return xerrors.New("RemoveCopies expects one file type") + } - return r.local.RemoveCopies(ctx, s, types) + if err := r.local.RemoveCopies(ctx, s, typ); err != nil { + return xerrors.Errorf("removing local copies: %w", err) + } + + si, err := r.index.StorageFindSector(ctx, s, typ, 0, false) + if err != nil { + return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", s, typ, err) + } + + var hasPrimary bool + var keep []ID + for _, info := range si { + if info.Primary { + hasPrimary = true + keep = append(keep, info.ID) + break + } + } + + if !hasPrimary { + log.Warnf("remote RemoveCopies: no primary copies of sector %v (%s), not removing anything", s, typ) + return nil + } + + return r.Remove(ctx, s, typ, true, keep) } func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler PartialFileHandler) *Remote { @@ -156,7 +180,7 @@ func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existin if op == storiface.AcquireMove { id := ID(storageID) - if err := r.deleteFromRemote(ctx, url, &id); err != nil { + if err := r.deleteFromRemote(ctx, url, []ID{id}); err != nil { log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) } } @@ -355,7 +379,7 @@ storeLoop: } } for _, url := range info.URLs { - if err := r.deleteFromRemote(ctx, url, nil); err != nil { + if err := r.deleteFromRemote(ctx, url, keepIn); err != nil { log.Warnf("remove %s: %+v", url, err) continue } @@ -366,9 +390,9 @@ storeLoop: return nil } -func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn *ID) error { +func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn IDList) error { if keepIn != nil { - url = url + "?keep=" + string(*keepIn) + url = url + "?keep=" + keepIn.String() } log.Infof("Delete %s", url) diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go index a7a82a728..239da9879 100644 --- a/extern/sector-storage/stores/remote_test.go +++ b/extern/sector-storage/stores/remote_test.go @@ -64,11 +64,7 @@ func TestMoveShared(t *testing.T) { ctx := context.Background() - dir, err := ioutil.TempDir("", "stores-remote-test-") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(dir) - }) + dir := t.TempDir() openRepo := func(dir string) repo.LockedRepo { r, err := repo.NewFS(dir) diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index 2ca86f546..4f7ae767d 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -516,7 +516,20 @@ func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { func (l *LocalWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { return l.asyncCall(ctx, sector, MoveStorage, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { - return nil, l.storage.MoveStorage(ctx, sector, types) + if err := l.storage.MoveStorage(ctx, sector, types); err != nil { + return nil, xerrors.Errorf("move to storage: %w", err) + } + + for _, fileType := range storiface.PathTypes { + if fileType&types == 0 { + continue + } + + if err := l.storage.RemoveCopies(ctx, sector.ID, fileType); err != nil { + return nil, xerrors.Errorf("rm copies (t:%s, s:%v): %w", fileType, sector, err) + } + } + return nil, nil }) } diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 2c50d1885..a02666135 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -111,6 +111,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto Committing: planCommitting, CommitFinalize: planOne( on(SectorFinalized{}, SubmitCommit), + on(SectorFinalizedAvailable{}, SubmitCommit), on(SectorFinalizeFailed{}, CommitFinalizeFailed), ), SubmitCommit: planOne( @@ -136,6 +137,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto FinalizeSector: planOne( on(SectorFinalized{}, Proving), + on(SectorFinalizedAvailable{}, Available), on(SectorFinalizeFailed{}, FinalizeFailed), ), @@ -283,7 +285,11 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto Proving: planOne( on(SectorFaultReported{}, FaultReported), on(SectorFaulty{}, Faulty), + on(SectorMarkForUpdate{}, Available), + ), + Available: planOne( on(SectorStartCCUpdate{}, SnapDealsWaitDeals), + on(SectorAbortUpgrade{}, Proving), ), Terminating: planOne( on(SectorTerminating{}, TerminateWait), @@ -393,7 +399,7 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta processed, err := p(events, state) if err != nil { - return nil, 0, xerrors.Errorf("running planner for state %s failed: %w", state.State, err) + return nil, processed, xerrors.Errorf("running planner for state %s failed: %w", state.State, err) } ///// @@ -558,6 +564,8 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta // Post-seal case Proving: return m.handleProvingSector, processed, nil + case Available: + return m.handleAvailableSector, processed, nil case Terminating: return m.handleTerminating, processed, nil case TerminateWait: diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index fc3b774f9..66497473f 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -286,6 +286,10 @@ type SectorFinalized struct{} func (evt SectorFinalized) apply(*SectorInfo) {} +type SectorFinalizedAvailable struct{} + +func (evt SectorFinalizedAvailable) apply(*SectorInfo) {} + type SectorRetryFinalize struct{} func (evt SectorRetryFinalize) apply(*SectorInfo) {} @@ -297,6 +301,10 @@ func (evt SectorFinalizeFailed) apply(*SectorInfo) {} // Snap deals // CC update path +type SectorMarkForUpdate struct{} + +func (evt SectorMarkForUpdate) apply(state *SectorInfo) {} + type SectorStartCCUpdate struct{} func (evt SectorStartCCUpdate) apply(state *SectorInfo) { diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go index 10ee17c6b..f3012a400 100644 --- a/extern/storage-sealing/fsm_test.go +++ b/extern/storage-sealing/fsm_test.go @@ -323,6 +323,33 @@ func TestBrokenState(t *testing.T) { } } +func TestBadEvent(t *testing.T) { + var notif []struct{ before, after SectorInfo } + ma, _ := address.NewIDAddress(55151) + m := test{ + s: &Sealing{ + maddr: ma, + stats: SectorStats{ + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, + }, + notifee: func(before, after SectorInfo) { + notif = append(notif, struct{ before, after SectorInfo }{before, after}) + }, + }, + t: t, + state: &SectorInfo{State: Proving}, + } + + _, processed, err := m.s.Plan([]statemachine.Event{{User: SectorPacked{}}}, m.state) + require.NoError(t, err) + require.Equal(t, uint64(1), processed) + require.Equal(m.t, m.state.State, Proving) + + require.Len(t, m.state.Log, 2) + require.Contains(t, m.state.Log[1].Message, "received unexpected event") +} + func TestTicketExpired(t *testing.T) { var notif []struct{ before, after SectorInfo } ma, _ := address.NewIDAddress(55151) diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index c999badfd..d2b51edc9 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -12,11 +12,14 @@ import ( "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/go-padreader" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-statemachine" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" @@ -30,8 +33,8 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e m.inputLk.Lock() - if m.creating != nil && *m.creating == sector.SectorNumber { - m.creating = nil + if m.nextDealSector != nil && *m.nextDealSector == sector.SectorNumber { + m.nextDealSector = nil } sid := m.minerSectorID(sector.SectorNumber) @@ -315,25 +318,21 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec m.inputLk.Unlock() // we already have a pre-existing add piece call for this deal, let's wait for it to finish and see if it's successful - for { - res, err := waitAddPieceResp(ctx, pp) - if err != nil { - return api.SectorOffset{}, err - } - // there was an error waiting for a pre-existing add piece call, let's retry - if res.err != nil { - m.inputLk.Lock() - pp = m.addPendingPiece(ctx, size, data, deal, sp) - m.inputLk.Unlock() - continue - } + res, err := waitAddPieceResp(ctx, pp) + if err != nil { + return api.SectorOffset{}, err + } + if res.err == nil { // all good, return the response return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } + // if there was an error waiting for a pre-existing add piece call, let's retry + m.inputLk.Lock() } + // addPendingPiece takes over m.inputLk pp := m.addPendingPiece(ctx, size, data, deal, sp) - m.inputLk.Unlock() + res, err := waitAddPieceResp(ctx, pp) if err != nil { return api.SectorOffset{}, err @@ -341,6 +340,7 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } +// called with m.inputLk; transfers the lock to another goroutine! func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSize, data storage.Data, deal api.PieceDealInfo, sp abi.RegisteredSealProof) *pendingPiece { doneCh := make(chan struct{}) pp := &pendingPiece{ @@ -357,6 +357,7 @@ func (m *Sealing) addPendingPiece(ctx context.Context, size abi.UnpaddedPieceSiz m.pendingPieces[proposalCID(deal)] = pp go func() { + defer m.inputLk.Unlock() if err := m.updateInput(ctx, sp); err != nil { log.Errorf("%+v", err) } @@ -386,8 +387,29 @@ func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error { return m.updateInput(ctx, sp) } +type expFn func(sn abi.SectorNumber) (abi.ChainEpoch, abi.TokenAmount, error) + // called with m.inputLk func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error { + memo := make(map[abi.SectorNumber]struct { + e abi.ChainEpoch + p abi.TokenAmount + }) + expF := func(sn abi.SectorNumber) (abi.ChainEpoch, abi.TokenAmount, error) { + if e, ok := memo[sn]; ok { + return e.e, e.p, nil + } + onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{}) + if err != nil { + return 0, big.Zero(), err + } + memo[sn] = struct { + e abi.ChainEpoch + p abi.TokenAmount + }{e: onChainInfo.Expiration, p: onChainInfo.InitialPledge} + return onChainInfo.Expiration, onChainInfo.InitialPledge, nil + } + ssize, err := sp.SectorSize() if err != nil { return err @@ -413,19 +435,6 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e toAssign[proposalCid] = struct{}{} - memo := make(map[abi.SectorNumber]abi.ChainEpoch) - expF := func(sn abi.SectorNumber) (abi.ChainEpoch, error) { - if exp, ok := memo[sn]; ok { - return exp, nil - } - onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{}) - if err != nil { - return 0, err - } - memo[sn] = onChainInfo.Expiration - return onChainInfo.Expiration, nil - } - for id, sector := range m.openSectors { avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used // check that sector lifetime is long enough to fit deal using latest expiration from on chain @@ -436,7 +445,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e continue } if !ok { - exp, _ := expF(sector.number) + exp, _, _ := expF(sector.number) log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch) continue } @@ -499,7 +508,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e if len(toAssign) > 0 { log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors) - if err := m.tryCreateDealSector(ctx, sp); err != nil { + if err := m.tryGetDealSector(ctx, sp, expF); err != nil { log.Errorw("Failed to create a new sector for deals", "error", err) } } @@ -507,10 +516,113 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e return nil } -func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSealProof) error { +func (m *Sealing) calcTargetExpiration(ctx context.Context, ssize abi.SectorSize) (minTarget, target abi.ChainEpoch, err error) { + var candidates []*pendingPiece + + for _, piece := range m.pendingPieces { + if piece.assigned { + continue // already assigned to a sector, skip + } + candidates = append(candidates, piece) + } + + // earliest expiration first + sort.Slice(candidates, func(i, j int) bool { + return candidates[i].deal.DealProposal.EndEpoch < candidates[j].deal.DealProposal.EndEpoch + }) + + var totalBytes uint64 + for _, candidate := range candidates { + totalBytes += uint64(candidate.size) + + if totalBytes >= uint64(abi.PaddedPieceSize(ssize).Unpadded()) { + return candidates[0].deal.DealProposal.EndEpoch, candidate.deal.DealProposal.EndEpoch, nil + } + } + + _, curEpoch, err := m.Api.ChainHead(ctx) + if err != nil { + return 0, 0, xerrors.Errorf("getting current epoch: %w", err) + } + + minDur, maxDur := policy.DealDurationBounds(0) + + return curEpoch + minDur, curEpoch + maxDur, nil +} + +func (m *Sealing) tryGetUpgradeSector(ctx context.Context, sp abi.RegisteredSealProof, ef expFn) (bool, error) { + if len(m.available) == 0 { + return false, nil + } + + ssize, err := sp.SectorSize() + if err != nil { + return false, xerrors.Errorf("getting sector size: %w", err) + } + minExpiration, targetExpiration, err := m.calcTargetExpiration(ctx, ssize) + if err != nil { + return false, xerrors.Errorf("calculating min target expiration: %w", err) + } + + var candidate abi.SectorID + var bestExpiration abi.ChainEpoch + bestPledge := types.TotalFilecoinInt + + for s := range m.available { + expiration, pledge, err := ef(s.Number) + if err != nil { + log.Errorw("checking sector expiration", "error", err) + continue + } + + slowChecks := func(sid abi.SectorNumber) bool { + active, err := m.sectorActive(ctx, TipSetToken{}, sid) + if err != nil { + log.Errorw("checking sector active", "error", err) + return false + } + if !active { + log.Debugw("skipping available sector", "reason", "not active") + return false + } + return true + } + + // if best is below target, we want larger expirations + // if best is above target, we want lower pledge, but only if still above target + + if bestExpiration < targetExpiration { + if expiration > bestExpiration && slowChecks(s.Number) { + bestExpiration = expiration + bestPledge = pledge + candidate = s + } + continue + } + + if expiration >= targetExpiration && pledge.LessThan(bestPledge) && slowChecks(s.Number) { + bestExpiration = expiration + bestPledge = pledge + candidate = s + } + } + + if bestExpiration < minExpiration { + log.Infow("Not upgrading any sectors", "available", len(m.available), "pieces", len(m.pendingPieces), "bestExp", bestExpiration, "target", targetExpiration, "min", minExpiration, "candidate", candidate) + // didn't find a good sector / no sectors were available + return false, nil + } + + log.Infow("Upgrading sector", "number", candidate.Number, "type", "deal", "proofType", sp, "expiration", bestExpiration, "pledge", types.FIL(bestPledge)) + delete(m.available, candidate) + m.nextDealSector = &candidate.Number + return true, m.sectors.Send(uint64(candidate.Number), SectorStartCCUpdate{}) +} + +func (m *Sealing) tryGetDealSector(ctx context.Context, sp abi.RegisteredSealProof, ef expFn) error { m.startupWait.Wait() - if m.creating != nil { + if m.nextDealSector != nil { return nil // new sector is being created right now } @@ -519,10 +631,6 @@ func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSeal return xerrors.Errorf("getting storage config: %w", err) } - if !cfg.MakeNewSectorForDeals { - return nil - } - if cfg.MaxSealingSectorsForDeals > 0 && m.stats.curSealing() >= cfg.MaxSealingSectorsForDeals { return nil } @@ -531,12 +639,24 @@ func (m *Sealing) tryCreateDealSector(ctx context.Context, sp abi.RegisteredSeal return nil } + got, err := m.tryGetUpgradeSector(ctx, sp, ef) + if err != nil { + return err + } + if got { + return nil + } + + if !cfg.MakeNewSectorForDeals { + return nil + } + sid, err := m.createSector(ctx, cfg, sp) if err != nil { return err } - m.creating = &sid + m.nextDealSector = &sid log.Infow("Creating sector", "number", sid, "type", "deal", "proofType", sp) return m.sectors.Send(uint64(sid), SectorStart{ @@ -575,6 +695,11 @@ func (m *Sealing) StartPacking(sid abi.SectorNumber) error { func (m *Sealing) AbortUpgrade(sid abi.SectorNumber) error { m.startupWait.Wait() + m.inputLk.Lock() + // always do this early + delete(m.available, m.minerSectorID(sid)) + m.inputLk.Unlock() + log.Infow("aborting upgrade of sector", "sector", sid, "trigger", "user") return m.sectors.Send(uint64(sid), SectorAbortUpgrade{xerrors.New("triggered by user")}) } diff --git a/extern/storage-sealing/mocks/api.go b/extern/storage-sealing/mocks/api.go index 95c222ecd..efe89ff0b 100644 --- a/extern/storage-sealing/mocks/api.go +++ b/extern/storage-sealing/mocks/api.go @@ -9,6 +9,7 @@ import ( reflect "reflect" address "github.com/filecoin-project/go-address" + bitfield "github.com/filecoin-project/go-bitfield" abi "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" crypto "github.com/filecoin-project/go-state-types/crypto" @@ -214,10 +215,10 @@ func (mr *MockSealingAPIMockRecorder) StateMarketStorageDealProposal(arg0, arg1, } // StateMinerActiveSectors mocks base method. -func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { +func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (bitfield.BitField, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) - ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret0, _ := ret[0].(bitfield.BitField) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/extern/storage-sealing/sealiface/config.go b/extern/storage-sealing/sealiface/config.go index 852034aa7..20bd2b564 100644 --- a/extern/storage-sealing/sealiface/config.go +++ b/extern/storage-sealing/sealiface/config.go @@ -20,6 +20,8 @@ type Config struct { MakeNewSectorForDeals bool + MakeCCSectorsAvailable bool + WaitDealsDelay time.Duration CommittedCapacitySectorLifetime time.Duration diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 907d7cdfd..f2566a8c9 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -13,6 +13,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" @@ -63,7 +64,7 @@ type SealingAPI interface { StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) - StateMinerActiveSectors(context.Context, address.Address, TipSetToken) ([]*miner.SectorOnChainInfo, error) + StateMinerActiveSectors(context.Context, address.Address, TipSetToken) (bitfield.BitField, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) @@ -104,10 +105,9 @@ type Sealing struct { sectorTimers map[abi.SectorID]*time.Timer pendingPieces map[cid.Cid]*pendingPiece assignedPieces map[abi.SectorID][]cid.Cid - creating *abi.SectorNumber // used to prevent a race where we could create a new sector more than once + nextDealSector *abi.SectorNumber // used to prevent a race where we could create a new sector more than once - upgradeLk sync.Mutex - toUpgrade map[abi.SectorNumber]struct{} + available map[abi.SectorID]struct{} notifee SectorStateNotifee addrSel AddrSel @@ -129,11 +129,11 @@ type openSector struct { maybeAccept func(cid.Cid) error // called with inputLk } -func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF func(sn abi.SectorNumber) (abi.ChainEpoch, error)) (bool, error) { +func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF expFn) (bool, error) { if !o.ccUpdate { return true, nil } - expiration, err := expF(o.number) + expiration, _, err := expF(o.number) if err != nil { return false, err } @@ -177,7 +177,8 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events sectorTimers: map[abi.SectorID]*time.Timer{}, pendingPieces: map[cid.Cid]*pendingPiece{}, assignedPieces: map[abi.SectorID][]cid.Cid{}, - toUpgrade: map[abi.SectorNumber]struct{}{}, + + available: map[abi.SectorID]struct{}{}, notifee: notifee, addrSel: as, diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index 0f8228d02..4f81f5544 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -25,6 +25,7 @@ var ExistSectorStateList = map[SectorState]struct{}{ CommitAggregateWait: {}, FinalizeSector: {}, Proving: {}, + Available: {}, FailedUnrecoverable: {}, SealPreCommit1Failed: {}, SealPreCommit2Failed: {}, @@ -98,6 +99,7 @@ const ( FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" + Available SectorState = "Available" // proving CC available for SnapDeals // snap deals / cc update SnapDealsWaitDeals SectorState = "SnapDealsWaitDeals" @@ -161,9 +163,31 @@ func toStatState(st SectorState, finEarly bool) statSectorState { return sstProving } return sstSealing - case Proving, UpdateActivating, ReleaseSectorKey, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: + case Proving, Available, UpdateActivating, ReleaseSectorKey, Removed, Removing, Terminating, TerminateWait, TerminateFinality, TerminateFailed: return sstProving } return sstFailed } + +func IsUpgradeState(st SectorState) bool { + switch st { + case SnapDealsWaitDeals, + SnapDealsAddPiece, + SnapDealsPacking, + UpdateReplica, + ProveReplicaUpdate, + SubmitReplicaUpdate, + + SnapDealsAddPieceFailed, + SnapDealsDealsExpired, + SnapDealsRecoverDealIDs, + AbortUpgrade, + ReplicaUpdateFailed, + ReleaseSectorKeyFailed, + FinalizeReplicaUpdateFailed: + return true + default: + return false + } +} diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index a1c3be460..90fa5090a 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -238,7 +238,7 @@ func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sect } // Abort upgrade for sectors that went faulty since being marked for upgrade - active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber) + active, err := m.sectorActive(ctx.Context(), tok, sector.SectorNumber) if err != nil { log.Errorf("sector active check: api error, not proceeding: %+v", err) return nil diff --git a/extern/storage-sealing/states_proving.go b/extern/storage-sealing/states_proving.go index e74119976..afb5c54bf 100644 --- a/extern/storage-sealing/states_proving.go +++ b/extern/storage-sealing/states_proving.go @@ -130,6 +130,11 @@ func (m *Sealing) handleRemoving(ctx statemachine.Context, sector SectorInfo) er func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInfo) error { // TODO: track sector health / expiration + m.inputLk.Lock() + // in case we revert into Proving without going into Available + delete(m.available, m.minerSectorID(sector.SectorNumber)) + m.inputLk.Unlock() + cfg, err := m.getConfig() if err != nil { return xerrors.Errorf("getting sealing config: %w", err) @@ -144,3 +149,13 @@ func (m *Sealing) handleProvingSector(ctx statemachine.Context, sector SectorInf return nil } + +func (m *Sealing) handleAvailableSector(ctx statemachine.Context, sector SectorInfo) error { + m.inputLk.Lock() + m.available[m.minerSectorID(sector.SectorNumber)] = struct{}{} + m.inputLk.Unlock() + // TODO: Watch termination + // TODO: Auto-extend if set + + return nil +} diff --git a/extern/storage-sealing/states_replica_update.go b/extern/storage-sealing/states_replica_update.go index bede7a5fa..8a4f05dc4 100644 --- a/extern/storage-sealing/states_replica_update.go +++ b/extern/storage-sealing/states_replica_update.go @@ -41,7 +41,7 @@ func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector Sect log.Errorf("handleProveReplicaUpdate: api error, not proceeding: %+v", err) return nil } - active, err := sectorActive(ctx.Context(), m.Api, m.maddr, tok, sector.SectorNumber) + active, err := m.sectorActive(ctx.Context(), tok, sector.SectorNumber) if err != nil { log.Errorf("sector active check: api error, not proceeding: %+v", err) return nil diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 3dba325ee..058c8316a 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -279,14 +279,6 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) }) } -// TODO: We should probably invoke this method in most (if not all) state transition failures after handlePreCommitting -func (m *Sealing) remarkForUpgrade(ctx context.Context, sid abi.SectorNumber) { - err := m.MarkForUpgrade(ctx, sid) - if err != nil { - log.Errorf("error re-marking sector %d as for upgrade: %+v", sid, err) - } -} - func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) (*miner.SectorPreCommitInfo, big.Int, TipSetToken, error) { tok, height, err := m.Api.ChainHead(ctx.Context()) if err != nil { @@ -360,16 +352,12 @@ func (m *Sealing) preCommitParams(ctx statemachine.Context, sector SectorInfo) ( DealIDs: sector.dealIDs(), } - depositMinimum := m.tryUpgradeSector(ctx.Context(), params) - collateral, err := m.Api.StateMinerPreCommitDepositForPower(ctx.Context(), m.maddr, *params, tok) if err != nil { return nil, big.Zero(), nil, xerrors.Errorf("getting initial pledge collateral: %w", err) } - deposit := big.Max(depositMinimum, collateral) - - return params, deposit, tok, nil + return params, collateral, tok, nil } func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInfo) error { @@ -423,9 +411,6 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf log.Infof("submitting precommit for sector %d (deposit: %s): ", sector.SectorNumber, deposit) mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes()) if err != nil { - if params.ReplaceCapacity { - m.remarkForUpgrade(ctx.Context(), params.ReplaceSectorNumber) - } return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } @@ -782,5 +767,8 @@ func (m *Sealing) handleFinalizeSector(ctx statemachine.Context, sector SectorIn return ctx.Send(SectorFinalizeFailed{xerrors.Errorf("finalize sector: %w", err)}) } + if cfg.MakeCCSectorsAvailable && !sector.hasDeals() { + return ctx.Send(SectorFinalizedAvailable{}) + } return ctx.Send(SectorFinalized{}) } diff --git a/extern/storage-sealing/upgrade_queue.go b/extern/storage-sealing/upgrade_queue.go index 1e5bef67c..b6fd6e173 100644 --- a/extern/storage-sealing/upgrade_queue.go +++ b/extern/storage-sealing/upgrade_queue.go @@ -3,64 +3,13 @@ package sealing import ( "context" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" - "golang.org/x/xerrors" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" ) -func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool { - m.upgradeLk.Lock() - _, found := m.toUpgrade[id] - m.upgradeLk.Unlock() - return found -} - -func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { - - m.upgradeLk.Lock() - defer m.upgradeLk.Unlock() - - _, found := m.toUpgrade[id] - if found { - return xerrors.Errorf("sector %d already marked for upgrade", id) - } - - si, err := m.GetSectorInfo(id) - if err != nil { - return xerrors.Errorf("getting sector info: %w", err) - } - if si.State != Proving { - return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade") - } - if len(si.Pieces) != 1 { - return xerrors.Errorf("not a committed-capacity sector, expected 1 piece") - } - if si.Pieces[0].DealInfo != nil { - return xerrors.Errorf("not a committed-capacity sector, has deals") - } - - m.toUpgrade[id] = struct{}{} - - return nil -} - func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) error { - cfg, err := m.getConfig() - if err != nil { - return xerrors.Errorf("getting storage config: %w", err) - } - - curStaging := m.stats.curStaging() - if cfg.MaxWaitDealsSectors > 0 && curStaging >= cfg.MaxWaitDealsSectors { - return xerrors.Errorf("already waiting for deals in %d >= %d (cfg.MaxWaitDealsSectors) sectors, no free resources to wait for deals in another", - curStaging, cfg.MaxWaitDealsSectors) - } - si, err := m.GetSectorInfo(id) if err != nil { return xerrors.Errorf("getting sector info: %w", err) @@ -70,11 +19,7 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade") } - if len(si.Pieces) != 1 { - return xerrors.Errorf("not a committed-capacity sector, expected 1 piece") - } - - if si.Pieces[0].DealInfo != nil { + if si.hasDeals() { return xerrors.Errorf("not a committed-capacity sector, has deals") } @@ -87,7 +32,7 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e return xerrors.Errorf("failed to read sector on chain info: %w", err) } - active, err := sectorActive(ctx, m.Api, m.maddr, tok, id) + active, err := m.sectorActive(ctx, tok, id) if err != nil { return xerrors.Errorf("failed to check if sector is active") } @@ -100,79 +45,14 @@ func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) e "Upgrade expiration before marking for upgrade", id, onChainInfo.Expiration) } - return m.sectors.Send(uint64(id), SectorStartCCUpdate{}) + return m.sectors.Send(uint64(id), SectorMarkForUpdate{}) } -func sectorActive(ctx context.Context, api SealingAPI, maddr address.Address, tok TipSetToken, sector abi.SectorNumber) (bool, error) { - active, err := api.StateMinerActiveSectors(ctx, maddr, tok) +func (m *Sealing) sectorActive(ctx context.Context, tok TipSetToken, sector abi.SectorNumber) (bool, error) { + active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok) if err != nil { return false, xerrors.Errorf("failed to check active sectors: %w", err) } - // Ensure the upgraded sector is active - var found bool - for _, si := range active { - if si.SectorNumber == sector { - found = true - break - } - } - return found, nil -} - -func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int { - if len(params.DealIDs) == 0 { - return big.Zero() - } - replace := m.maybeUpgradableSector() - if replace != nil { - loc, err := m.Api.StateSectorPartition(ctx, m.maddr, *replace, nil) - if err != nil { - log.Errorf("error calling StateSectorPartition for replaced sector: %+v", err) - return big.Zero() - } - - params.ReplaceCapacity = true - params.ReplaceSectorNumber = *replace - params.ReplaceSectorDeadline = loc.Deadline - params.ReplaceSectorPartition = loc.Partition - - log.Infof("replacing sector %d with %d", *replace, params.SectorNumber) - - ri, err := m.Api.StateSectorGetInfo(ctx, m.maddr, *replace, nil) - if err != nil { - log.Errorf("error calling StateSectorGetInfo for replaced sector: %+v", err) - return big.Zero() - } - if ri == nil { - log.Errorf("couldn't find sector info for sector to replace: %+v", replace) - return big.Zero() - } - - if params.Expiration < ri.Expiration { - // TODO: Some limit on this - params.Expiration = ri.Expiration - } - - return ri.InitialPledge - } - - return big.Zero() -} - -func (m *Sealing) maybeUpgradableSector() *abi.SectorNumber { - m.upgradeLk.Lock() - defer m.upgradeLk.Unlock() - for number := range m.toUpgrade { - // TODO: checks to match actor constraints - - // this one looks good - /*if checks */ - { - delete(m.toUpgrade, number) - return &number - } - } - - return nil + return active.IsSet(uint64(sector)) } diff --git a/gateway/node_test.go b/gateway/node_test.go index 68711cca6..4705f9bd5 100644 --- a/gateway/node_test.go +++ b/gateway/node_test.go @@ -1,3 +1,4 @@ +//stm: #unit package gateway import ( @@ -94,6 +95,7 @@ func TestGatewayAPIChainGetTipSetByHeight(t *testing.T) { // Create tipsets from genesis up to tskh and return the highest ts := mock.createTipSets(tt.args.tskh, tt.args.genesisTS) + //stm: @GATEWAY_NODE_GET_TIPSET_BY_HEIGHT_001 got, err := a.ChainGetTipSetByHeight(ctx, tt.args.h, ts.Key()) if tt.expErr { require.Error(t, err) @@ -241,6 +243,7 @@ func (m *mockGatewayDepsAPI) Version(context.Context) (api.APIVersion, error) { } func TestGatewayVersion(t *testing.T) { + //stm: @GATEWAY_NODE_GET_VERSION_001 ctx := context.Background() mock := &mockGatewayDepsAPI{} a := NewNode(mock, DefaultLookbackCap, DefaultStateWaitLookbackLimit) diff --git a/gen/main.go b/gen/main.go index 0018b241d..f7b96c537 100644 --- a/gen/main.go +++ b/gen/main.go @@ -10,6 +10,7 @@ import ( "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/market" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/node/hello" @@ -106,4 +107,11 @@ func main() { fmt.Println(err) os.Exit(1) } + err = gen.WriteMapEncodersToFile("./cmd/lotus-shed/shedgen/cbor_gen.go", "shedgen", + shedgen.CarbNode{}, + ) + if err != nil { + fmt.Println(err) + os.Exit(1) + } } diff --git a/go.mod b/go.mod index 060613d19..03450afdb 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/filecoin-project/go-cbor-util v0.0.1 github.com/filecoin-project/go-commp-utils v0.1.3 github.com/filecoin-project/go-crypto v0.0.1 - github.com/filecoin-project/go-data-transfer v1.14.0 + github.com/filecoin-project/go-data-transfer v1.15.0 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 github.com/filecoin-project/go-fil-markets v1.20.1 @@ -82,7 +82,7 @@ require ( github.com/ipfs/go-ds-leveldb v0.5.0 github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.7 - github.com/ipfs/go-graphsync v0.12.0 + github.com/ipfs/go-graphsync v0.13.0 github.com/ipfs/go-ipfs-blockstore v1.1.2 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 @@ -101,19 +101,19 @@ require ( github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-unixfs v0.3.1 - github.com/ipfs/go-unixfsnode v1.2.0 + github.com/ipfs/go-unixfsnode v1.4.0 github.com/ipfs/interface-go-ipfs-core v0.5.2 github.com/ipld/go-car v0.3.3 github.com/ipld/go-car/v2 v2.1.1 github.com/ipld/go-codec-dagpb v1.3.0 - github.com/ipld/go-ipld-prime v0.14.4 + github.com/ipld/go-ipld-prime v0.16.0 github.com/ipld/go-ipld-selector-text-lite v0.0.1 github.com/jonboulle/clockwork v0.2.2 // indirect github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.18.0-rc5 - github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect + github.com/libp2p/go-libp2p v0.18.0-rc6 + github.com/libp2p/go-libp2p-connmgr v0.3.1 github.com/libp2p/go-libp2p-core v0.14.0 github.com/libp2p/go-libp2p-discovery v0.6.0 github.com/libp2p/go-libp2p-kad-dht v0.15.0 @@ -122,7 +122,7 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.6.1 github.com/libp2p/go-libp2p-quic-transport v0.16.1 github.com/libp2p/go-libp2p-record v0.1.3 - github.com/libp2p/go-libp2p-resource-manager v0.1.4 + github.com/libp2p/go-libp2p-resource-manager v0.1.5 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 github.com/libp2p/go-libp2p-swarm v0.10.2 github.com/libp2p/go-libp2p-tls v0.3.1 diff --git a/go.sum b/go.sum index d07b83200..79629f1b5 100644 --- a/go.sum +++ b/go.sum @@ -324,12 +324,12 @@ github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-dagaggregator-unixfs v0.2.0/go.mod h1:WTuJWgBQY0omnQqa8kRPT9O0Uj5wQOgslVMUuTeHdJ8= -github.com/filecoin-project/go-data-transfer v1.14.0 h1:4pnfJk8FYtqcdAg+QRGzaz57seUC/Tz+HJgPuGB7zdg= github.com/filecoin-project/go-data-transfer v1.14.0/go.mod h1:wNJKhaLLYBJDM3VFvgvYi4iUjPa69pz/1Q5Q4HzX2wE= +github.com/filecoin-project/go-data-transfer v1.15.0 h1:gVH7MxEgoj/qXPz+S6ggFlHlDv1mLlRZuJtTvcq8r1o= +github.com/filecoin-project/go-data-transfer v1.15.0/go.mod h1:RaJIYjh6x6z+FXKNvUULOdUZdN+JutKigfcMMbfykWA= github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= github.com/filecoin-project/go-ds-versioning v0.1.1/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= @@ -389,8 +389,8 @@ github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4U github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= -github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1.0.20220118005651-2470cb39827e/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= github.com/filecoin-project/specs-actors/v7 v7.0.0 h1:FQN7tjt3o68hfb3qLFSJBoLMuOFY0REkFVLO/zXj8RU= github.com/filecoin-project/specs-actors/v7 v7.0.0/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= github.com/filecoin-project/specs-storage v0.2.0 h1:Y4UDv0apRQ3zI2GiPPubi8JblpUZZphEdaJUxCutfyg= @@ -410,8 +410,9 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -551,8 +552,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -754,8 +756,9 @@ github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28 github.com/ipfs/go-fs-lock v0.0.7 h1:6BR3dajORFrFTkb5EpCUFIAypsoxpGpDSVUdFwzgL9U= github.com/ipfs/go-fs-lock v0.0.7/go.mod h1:Js8ka+FNYmgQRLrRXzU3CB/+Csr1BwrRilEcvYrHhhc= github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= -github.com/ipfs/go-graphsync v0.12.0 h1:QCsVHVzb9FTkcm3NEa8GjXnUeGit1L9s08HcSVQ4m/g= github.com/ipfs/go-graphsync v0.12.0/go.mod h1:nASYWYETgsnMbQ3+DirNImOHQ8TY0a5AhAqyOY55tUg= +github.com/ipfs/go-graphsync v0.13.0 h1:8reYjVKxKocJ9jD471xs9XNuegquPrnBFuGZmCqT8zU= +github.com/ipfs/go-graphsync v0.13.0/go.mod h1:oPBU9JGNlyWHyH9lWYmyl19M++5yiXjBnNC4boh5nVU= github.com/ipfs/go-ipfs v0.11.0/go.mod h1:g68Thu2Ho11AWoHsN34P5fSK7iA6OWWRy3T/g8HLixc= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= @@ -886,8 +889,9 @@ github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= github.com/ipfs/go-unixfsnode v1.1.2/go.mod h1:5dcE2x03pyjHk4JjamXmunTMzz+VUtqvPwZjIEkfV6s= github.com/ipfs/go-unixfsnode v1.1.3/go.mod h1:ZZxUM5wXBC+G0Co9FjrYTOm+UlhZTjxLfRYdWY9veZ4= -github.com/ipfs/go-unixfsnode v1.2.0 h1:tHHBJftsJyHGa8bS62PpkYNqHy/Sug3c/vxxC8NaGQY= github.com/ipfs/go-unixfsnode v1.2.0/go.mod h1:mQEgLjxkV/1mohkC4p7taRRBYPBeXu97SA3YaerT2q0= +github.com/ipfs/go-unixfsnode v1.4.0 h1:9BUxHBXrbNi8mWHc6j+5C580WJqtVw9uoeEKn4tMhwA= +github.com/ipfs/go-unixfsnode v1.4.0/go.mod h1:qc7YFFZ8tABc58p62HnIYbUMwj9chhUuFWmxSokfePo= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/interface-go-ipfs-core v0.4.0/go.mod h1:UJBcU6iNennuI05amq3FQ7g0JHUkibHFAfhfUIy927o= @@ -918,8 +922,9 @@ github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD github.com/ipld/go-ipld-prime v0.14.1/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime v0.14.2/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= -github.com/ipld/go-ipld-prime v0.14.4 h1:bqhmume8+nbNsX4/+J6eohktfZHAI8GKrF3rQ0xgOyc= github.com/ipld/go-ipld-prime v0.14.4/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.16.0 h1:RS5hhjB/mcpeEPJvfyj0qbOj/QL+/j05heZ0qa97dVo= +github.com/ipld/go-ipld-prime v0.16.0/go.mod h1:axSCuOCBPqrH+gvXr2w9uAOulJqBPhHPT2PjoiiU1qA= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= @@ -1060,8 +1065,8 @@ github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76f github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= github.com/libp2p/go-libp2p v0.18.0-rc3/go.mod h1:WYL+Xw1iuwi6rdfzw5VIEpD+HqzYucHZ6fcUuumbI3M= -github.com/libp2p/go-libp2p v0.18.0-rc5 h1:88wWDHb9nNo0vBNCupLde3OTnFAkugOCNkrDfl3ivK4= -github.com/libp2p/go-libp2p v0.18.0-rc5/go.mod h1:aZPS5l84bDvCvP4jkyEUT/J6YOpUq33Fgqrs3K59mpI= +github.com/libp2p/go-libp2p v0.18.0-rc6 h1:IR6TVPYGo1wDY0tY61gyPQVxK1koOkXh49ejVfAnH7A= +github.com/libp2p/go-libp2p v0.18.0-rc6/go.mod h1:oOUOAlBrm1L0+jxT10h2TMUMTDz6pV3EvmkJ3beDYGQ= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= @@ -1173,8 +1178,9 @@ github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxW github.com/libp2p/go-libp2p-mplex v0.3.0/go.mod h1:l9QWxRbbb5/hQMECEb908GbS9Sm2UAR2KFZKUJEynEs= github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-mplex v0.5.0 h1:vt3k4E4HSND9XH4Z8rUpacPJFSAgLOv6HDvG8W9Ks9E= github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= +github.com/libp2p/go-libp2p-mplex v0.6.0 h1:5ubK4/vLE2JkogKlJ2JLeXcSfA6qY6mE2HMJV9ve/Sk= +github.com/libp2p/go-libp2p-mplex v0.6.0/go.mod h1:i3usuPrBbh9FD2fLZjGpotyNkwr42KStYZQY7BeTiu4= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= @@ -1232,8 +1238,8 @@ github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs2 github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-resource-manager v0.1.3/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= -github.com/libp2p/go-libp2p-resource-manager v0.1.4 h1:RcxMD0pytOUimx3BqTVs6IqItb3H5Qg44SD7XyT68lw= -github.com/libp2p/go-libp2p-resource-manager v0.1.4/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.1.5 h1:7J6t9KLFS0MxXDTfqA6rwfVCZl/yLQnXW5LpZjHAANI= +github.com/libp2p/go-libp2p-resource-manager v0.1.5/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= @@ -1272,8 +1278,9 @@ github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotl github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= -github.com/libp2p/go-libp2p-testing v0.7.0 h1:9bfyhNINizxuLrKsenzGaZalXRXIaAEmx1BP/PzF1gM= github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= +github.com/libp2p/go-libp2p-testing v0.8.0 h1:/te8SOIyj5sGH5Jr1Uoo+qYB00aK8O4+yHGzLgfE3kc= +github.com/libp2p/go-libp2p-testing v0.8.0/go.mod h1:gRdsNxQSxAZowTgcLY7CC33xPmleZzoBpqSYbWenqPc= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= @@ -1325,8 +1332,9 @@ github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3 github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.4.0 h1:Ukkez9/4EOX5rTw4sHefNJp10dksftAA05ZgyjplUbM= github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= +github.com/libp2p/go-mplex v0.6.0 h1:5kKp029zrsLVJT5q6ASt4LwuZFxj3B13wXXaGmFrWg0= +github.com/libp2p/go-mplex v0.6.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -1934,6 +1942,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220224212727-7a699437a831/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14 h1:vo2wkP2ceHyGyZwFFtAabpot03EeSxxwAe57pOI9E/4= github.com/whyrusleeping/cbor-gen v0.0.0-20220302191723-37c43cae8e14/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -2438,7 +2447,6 @@ golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= diff --git a/itests/batch_deal_test.go b/itests/batch_deal_test.go index 93d338e53..b3c77fcba 100644 --- a/itests/batch_deal_test.go +++ b/itests/batch_deal_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,16 +10,19 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/markets/storageadapter" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/stretchr/testify/require" ) func TestBatchDealInput(t *testing.T) { - t.Skip("this test is disabled as it's flaky: #4611") + //stm: @MINER_SECTOR_STATUS_001, @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() var ( @@ -47,17 +51,21 @@ func TestBatchDealInput(t *testing.T) { })), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { return func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 2, - MaxSealingSectors: 1, - MaxSealingSectorsForDeals: 3, - AlwaysKeepUnsealedCopy: true, - WaitDealsDelay: time.Hour, - }, nil + cfg := config.DefaultStorageMiner() + sc := modules.ToSealingConfig(cfg.Dealmaking, cfg.Sealing) + sc.MaxWaitDealsSectors = 2 + sc.MaxSealingSectors = 1 + sc.MaxSealingSectorsForDeals = 3 + sc.AlwaysKeepUnsealedCopy = true + sc.WaitDealsDelay = time.Hour + sc.BatchPreCommits = false + sc.AggregateCommits = false + + return sc, nil }, nil }), )) - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts) + client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts, kit.ThroughRPC()) ens.InterconnectAll().BeginMining(blockTime) dh := kit.NewDealHarness(t, client, miner, miner) @@ -126,9 +134,9 @@ func TestBatchDealInput(t *testing.T) { t.Run("4-p513B", run(513, 4, 2)) if !testing.Short() { t.Run("32-p257B", run(257, 32, 8)) - t.Run("32-p10B", run(10, 32, 2)) // fixme: this appears to break data-transfer / markets in some really creative ways + //t.Run("32-p10B", run(10, 32, 2)) // t.Run("128-p10B", run(10, 128, 8)) } } diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index 6e7a5d090..ac89088c2 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -10,12 +10,14 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/itests/kit" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + sealing "github.com/filecoin-project/lotus/extern/storage-sealing" + "github.com/filecoin-project/lotus/itests/kit" ) func TestCCUpgrade(t *testing.T) { @@ -48,9 +50,6 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode { CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) fmt.Printf("CCUpgrade: %d\n", CCUpgrade) - // wait for deadline 0 to pass so that committing starts after post on preseals - // this gives max time for post to complete minimizing chances of timeout - // waitForDeadline(ctx, t, 1, client, maddr) miner.PledgeSectors(ctx, 1, 0, nil) sl, err := miner.SectorsList(ctx) require.NoError(t, err) @@ -63,6 +62,7 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode { } waitForSectorActive(ctx, t, CCUpgrade, client, maddr) + //stm: @SECTOR_CC_UPGRADE_001 err = miner.SectorMarkForUpgrade(ctx, sl[0], true) require.NoError(t, err) @@ -89,18 +89,6 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode { return client } -func waitForDeadline(ctx context.Context, t *testing.T, waitIdx uint64, node *kit.TestFullNode, maddr address.Address) { - for { - ts, err := node.ChainHead(ctx) - require.NoError(t, err) - dl, err := node.StateMinerProvingDeadline(ctx, maddr, ts.Key()) - require.NoError(t, err) - if dl.Index == waitIdx { - return - } - } -} - func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, node *kit.TestFullNode, maddr address.Address) { for { active, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) @@ -116,18 +104,6 @@ func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, } } -func waitForSectorStartUpgrade(ctx context.Context, t *testing.T, sn abi.SectorNumber, miner *kit.TestMiner) { - for { - si, err := miner.StorageMiner.SectorsStatus(ctx, sn, false) - require.NoError(t, err) - if si.State != api.SectorState("Proving") { - t.Logf("Done proving sector in state: %s", si.State) - return - } - - } -} - func TestCCUpgradeAndPoSt(t *testing.T) { kit.QuietMiningLogs() t.Run("upgrade and then post", func(t *testing.T) { @@ -148,13 +124,13 @@ func TestCCUpgradeAndPoSt(t *testing.T) { }) } -func TestTooManyMarkedForUpgrade(t *testing.T) { +func TestAbortUpgradeAvailable(t *testing.T) { kit.QuietMiningLogs() ctx := context.Background() blockTime := 1 * time.Millisecond - client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15), kit.ThroughRPC()) ens.InterconnectAll().BeginMiningMustPost(blockTime) maddr, err := miner.ActorAddress(ctx) @@ -163,32 +139,53 @@ func TestTooManyMarkedForUpgrade(t *testing.T) { } CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) - waitForDeadline(ctx, t, 1, client, maddr) - miner.PledgeSectors(ctx, 3, 0, nil) + fmt.Printf("CCUpgrade: %d\n", CCUpgrade) + miner.PledgeSectors(ctx, 1, 0, nil) sl, err := miner.SectorsList(ctx) require.NoError(t, err) - require.Len(t, sl, 3, "expected 3 sectors") - + require.Len(t, sl, 1, "expected 1 sector") + require.Equal(t, CCUpgrade, sl[0], "unexpected sector number") { si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK) require.NoError(t, err) require.Less(t, 50000, int(si.Expiration)) } - waitForSectorActive(ctx, t, CCUpgrade, client, maddr) - waitForSectorActive(ctx, t, CCUpgrade+1, client, maddr) - waitForSectorActive(ctx, t, CCUpgrade+2, client, maddr) - err = miner.SectorMarkForUpgrade(ctx, CCUpgrade, true) - require.NoError(t, err) - err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+1, true) + err = miner.SectorMarkForUpgrade(ctx, sl[0], true) require.NoError(t, err) - waitForSectorStartUpgrade(ctx, t, CCUpgrade, miner) - waitForSectorStartUpgrade(ctx, t, CCUpgrade+1, miner) + sl, err = miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") - err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+2, true) - require.Error(t, err) - assert.Contains(t, err.Error(), "no free resources to wait for deals") + ss, err := miner.SectorsStatus(ctx, sl[0], false) + require.NoError(t, err) + + for i := 0; i < 100; i++ { + ss, err = miner.SectorsStatus(ctx, sl[0], false) + require.NoError(t, err) + if ss.State == api.SectorState(sealing.Proving) { + time.Sleep(50 * time.Millisecond) + continue + } + + require.Equal(t, api.SectorState(sealing.Available), ss.State) + break + } + + require.NoError(t, miner.SectorAbortUpgrade(ctx, sl[0])) + + for i := 0; i < 100; i++ { + ss, err = miner.SectorsStatus(ctx, sl[0], false) + require.NoError(t, err) + if ss.State == api.SectorState(sealing.Available) { + time.Sleep(50 * time.Millisecond) + continue + } + + require.Equal(t, api.SectorState(sealing.Proving), ss.State) + break + } } diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index f8343415c..015cf03a1 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -27,6 +27,10 @@ import ( // TestDealWithMarketAndMinerNode is running concurrently a number of storage and retrieval deals towards a miner // architecture where the `mining/sealing/proving` node is a separate process from the `markets` node func TestDealWithMarketAndMinerNode(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 if testing.Short() { t.Skip("skipping test in short mode") } @@ -126,6 +130,10 @@ func TestDealCyclesConcurrent(t *testing.T) { } func TestSimultanenousTransferLimit(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 t.Skip("skipping as flaky #7152") if testing.Short() { diff --git a/itests/deals_partial_retrieval_dm-level_test.go b/itests/deals_partial_retrieval_dm-level_test.go index fd289a0ac..4b1a5b0e3 100644 --- a/itests/deals_partial_retrieval_dm-level_test.go +++ b/itests/deals_partial_retrieval_dm-level_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -36,7 +37,13 @@ var ( ) func TestDMLevelPartialRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_RETRIEVAL_RETRIEVE_001, @CLIENT_RETRIEVAL_FIND_001 ctx := context.Background() policy.SetPreCommitChallengeDelay(2) diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go index a232d82e0..03f5d6e34 100644 --- a/itests/kit/blockminer.go +++ b/itests/kit/blockminer.go @@ -192,7 +192,11 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur InjectNulls: abi.ChainEpoch(nulls + i), Done: reportSuccessFn, }) - success = <-wait + select { + case success = <-wait: + case <-ctx.Done(): + return + } if !success { // if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post if ts.Height()+1+abi.ChainEpoch(nulls+i) >= dlinfo.Last() { diff --git a/itests/kit/itestd.go b/itests/kit/itestd.go new file mode 100644 index 000000000..5911a2031 --- /dev/null +++ b/itests/kit/itestd.go @@ -0,0 +1,36 @@ +package kit + +import ( + "bytes" + "encoding/json" + "net/http" + "os" +) + +type ItestdNotif struct { + NodeType string // api env var name + TestName string + Api string +} + +func sendItestdNotif(nodeType, testName, apiAddr string) { + td := os.Getenv("LOTUS_ITESTD") + if td == "" { + // not running + return + } + + notif := ItestdNotif{ + NodeType: nodeType, + TestName: testName, + Api: apiAddr, + } + nb, err := json.Marshal(¬if) + if err != nil { + return + } + + if _, err := http.Post(td, "application/json", bytes.NewReader(nb)); err != nil { // nolint:gosec + return + } +} diff --git a/itests/kit/log.go b/itests/kit/log.go index 3dce3af9d..4ec610baf 100644 --- a/itests/kit/log.go +++ b/itests/kit/log.go @@ -15,5 +15,6 @@ func QuietMiningLogs() { _ = logging.SetLogLevel("storageminer", "ERROR") _ = logging.SetLogLevel("pubsub", "ERROR") _ = logging.SetLogLevel("gen", "ERROR") + _ = logging.SetLogLevel("rpc", "ERROR") _ = logging.SetLogLevel("dht/RtRefreshManager", "ERROR") } diff --git a/itests/kit/rpc.go b/itests/kit/rpc.go index 61c8a7b23..1abab8005 100644 --- a/itests/kit/rpc.go +++ b/itests/kit/rpc.go @@ -40,6 +40,7 @@ func fullRpc(t *testing.T, f *TestFullNode) *TestFullNode { srv, maddr := CreateRPCServer(t, handler, l) fmt.Printf("FULLNODE RPC ENV FOR CLI DEBUGGING `export FULLNODE_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String()) + sendItestdNotif("FULLNODE_API_INFO", t.Name(), "ws://"+srv.Listener.Addr().String()) cl, stop, err := client.NewFullNodeRPCV1(context.Background(), "ws://"+srv.Listener.Addr().String()+"/rpc/v1", nil) require.NoError(t, err) @@ -57,6 +58,7 @@ func minerRpc(t *testing.T, m *TestMiner) *TestMiner { fmt.Printf("creating RPC server for %s at %s\n", m.ActorAddr, srv.Listener.Addr().String()) fmt.Printf("SP RPC ENV FOR CLI DEBUGGING `export MINER_API_INFO=%s`\n", "ws://"+srv.Listener.Addr().String()) + sendItestdNotif("MINER_API_INFO", t.Name(), "ws://"+srv.Listener.Addr().String()) url := "ws://" + srv.Listener.Addr().String() + "/rpc/v0" cl, stop, err := client.NewStorageMinerRPCV0(context.Background(), url, nil) diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go index af67b132b..22c04b5ea 100644 --- a/itests/sector_miner_collateral_test.go +++ b/itests/sector_miner_collateral_test.go @@ -17,6 +17,8 @@ import ( "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo" ) @@ -40,29 +42,32 @@ func TestMinerBalanceCollateral(t *testing.T) { opts := kit.ConstructorOpts( node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) { return func() (sealiface.Config, error) { - return sealiface.Config{ - MaxWaitDealsSectors: 4, - MaxSealingSectors: 4, - MaxSealingSectorsForDeals: 4, - AlwaysKeepUnsealedCopy: true, - WaitDealsDelay: time.Hour, + cfg := config.DefaultStorageMiner() + sc := modules.ToSealingConfig(cfg.Dealmaking, cfg.Sealing) - BatchPreCommits: batching, - AggregateCommits: batching, + sc.MaxWaitDealsSectors = 4 + sc.MaxSealingSectors = 4 + sc.MaxSealingSectorsForDeals = 4 + sc.AlwaysKeepUnsealedCopy = true + sc.WaitDealsDelay = time.Hour - PreCommitBatchWait: time.Hour, - CommitBatchWait: time.Hour, + sc.BatchPreCommits = batching + sc.AggregateCommits = batching - MinCommitBatch: nSectors, - MaxPreCommitBatch: nSectors, - MaxCommitBatch: nSectors, + sc.PreCommitBatchWait = time.Hour + sc.CommitBatchWait = time.Hour - CollateralFromMinerBalance: enabled, - AvailableBalanceBuffer: big.Zero(), - DisableCollateralFallback: false, - AggregateAboveBaseFee: big.Zero(), - BatchPreCommitAboveBaseFee: big.Zero(), - }, nil + sc.MinCommitBatch = nSectors + sc.MaxPreCommitBatch = nSectors + sc.MaxCommitBatch = nSectors + + sc.CollateralFromMinerBalance = enabled + sc.AvailableBalanceBuffer = big.Zero() + sc.DisableCollateralFallback = false + sc.AggregateAboveBaseFee = big.Zero() + sc.BatchPreCommitAboveBaseFee = big.Zero() + + return sc, nil }, nil })), ) diff --git a/itests/self_sent_txn_test.go b/itests/self_sent_txn_test.go index b5ec2c0dc..8d608ba95 100644 --- a/itests/self_sent_txn_test.go +++ b/itests/self_sent_txn_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -19,6 +20,7 @@ import ( // we reordered the checks to make sure that a transaction with too much money in it sent to yourself will fail instead of succeeding as a noop // more info in this PR! https://github.com/filecoin-project/lotus/pull/7637 func TestSelfSentTxnV15(t *testing.T) { + //stm: @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_PUSH_001 ctx := context.Background() kit.QuietMiningLogs() @@ -60,6 +62,7 @@ func TestSelfSentTxnV15(t *testing.T) { } func TestSelfSentTxnV14(t *testing.T) { + //stm: @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_PUSH_001 ctx := context.Background() kit.QuietMiningLogs() diff --git a/journal/alerting/alerts_test.go b/journal/alerting/alerts_test.go index 46ab4bbbf..8147ac5e8 100644 --- a/journal/alerting/alerts_test.go +++ b/journal/alerting/alerts_test.go @@ -1,3 +1,4 @@ +//stm: #unit package alerting import ( @@ -12,6 +13,7 @@ import ( ) func TestAlerting(t *testing.T) { + //stm: @JOURNAL_ALERTS_ADD_ALERT_TYPE_001, @JOURNAL_ALERTS_RAISE_001, @JOURNAL_ALERTS_GET_ALERTS_001 mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() j := mockjournal.NewMockJournal(mockCtrl) diff --git a/journal/registry_test.go b/journal/registry_test.go index bce3d3f17..cb83be6e9 100644 --- a/journal/registry_test.go +++ b/journal/registry_test.go @@ -1,3 +1,4 @@ +//stm: #unit package journal import ( @@ -7,6 +8,7 @@ import ( ) func TestDisabledEvents(t *testing.T) { + //stm: @JOURNAL_REGISTRY_NEW_EVENT_TYPE_001, @JOURNAL_REGISTRY_PARSE_DISABLED_001 req := require.New(t) test := func(dis DisabledEvents) func(*testing.T) { @@ -44,6 +46,7 @@ func TestDisabledEvents(t *testing.T) { } func TestParseDisableEvents(t *testing.T) { + //stm: @JOURNAL_REGISTRY_PARSE_DISABLED_002 _, err := ParseDisabledEvents("system1:disabled1:failed,system1:disabled2") require.Error(t, err) } diff --git a/lib/backupds/backupds_test.go b/lib/backupds/backupds_test.go index c681491e3..bbbbdcd2a 100644 --- a/lib/backupds/backupds_test.go +++ b/lib/backupds/backupds_test.go @@ -1,3 +1,4 @@ +//stm: #unit package backupds import ( @@ -5,7 +6,6 @@ import ( "context" "fmt" "io/ioutil" - "os" "path/filepath" "strings" "testing" @@ -37,6 +37,7 @@ func checkVals(t *testing.T, ds datastore.Datastore, start, end int, exist bool) } func TestNoLogRestore(t *testing.T) { + //stm: @OTHER_DATASTORE_RESTORE_002 ds1 := datastore.NewMapDatastore() putVals(t, ds1, 0, 10) @@ -57,9 +58,8 @@ func TestNoLogRestore(t *testing.T) { } func TestLogRestore(t *testing.T) { - logdir, err := ioutil.TempDir("", "backupds-test-") - require.NoError(t, err) - defer os.RemoveAll(logdir) // nolint + //stm: @OTHER_DATASTORE_RESTORE_001 + logdir := t.TempDir() ds1 := datastore.NewMapDatastore() diff --git a/lib/lotuslog/config.go b/lib/lotuslog/config.go new file mode 100644 index 000000000..bf6ceb63f --- /dev/null +++ b/lib/lotuslog/config.go @@ -0,0 +1,11 @@ +package lotuslog + +import logging "github.com/ipfs/go-log/v2" + +func SetLevelsFromConfig(l map[string]string) { + for sys, level := range l { + if err := logging.SetLogLevel(sys, level); err != nil { + continue + } + } +} diff --git a/lib/rpcenc/reader_test.go b/lib/rpcenc/reader_test.go index 87296e1e5..302908df0 100644 --- a/lib/rpcenc/reader_test.go +++ b/lib/rpcenc/reader_test.go @@ -1,3 +1,4 @@ +//stm: #unit package rpcenc import ( diff --git a/lib/sigs/bls/bls_bench_test.go b/lib/sigs/bls/bls_bench_test.go index 118d25975..2f1f6ee6f 100644 --- a/lib/sigs/bls/bls_bench_test.go +++ b/lib/sigs/bls/bls_bench_test.go @@ -1,3 +1,5 @@ +//stm: ignore +// Ignored because implementation relies on external (ffi) lib package bls import ( diff --git a/lib/sigs/bls/bls_test.go b/lib/sigs/bls/bls_test.go index 4508d0eb9..d6956a383 100644 --- a/lib/sigs/bls/bls_test.go +++ b/lib/sigs/bls/bls_test.go @@ -1,3 +1,5 @@ +//stm: ignore +// Ignored because implementation relies on external (ffi) lib package bls_test import ( diff --git a/lib/stati/stats_test.go b/lib/stati/stats_test.go index fa92913b6..b6aa7a0a5 100644 --- a/lib/stati/stats_test.go +++ b/lib/stati/stats_test.go @@ -1,3 +1,4 @@ +//stm: ignore package stati import ( diff --git a/lib/tablewriter/tablewiter_test.go b/lib/tablewriter/tablewiter_test.go index 9c5f9d37d..ce676abdf 100644 --- a/lib/tablewriter/tablewiter_test.go +++ b/lib/tablewriter/tablewiter_test.go @@ -1,3 +1,4 @@ +//stm: #unit package tablewriter import ( @@ -8,6 +9,7 @@ import ( ) func TestTableWriter(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_TABLE_WRITE_001, @OTHER_IMPLEMENTATION_TABLE_FLUSH_001 tw := New(Col("C1"), Col("X"), Col("C333"), NewLineCol("Thing")) tw.Write(map[string]interface{}{ "C1": "234", diff --git a/lib/ulimit/ulimit_test.go b/lib/ulimit/ulimit_test.go index ecd777e3d..071c6013c 100644 --- a/lib/ulimit/ulimit_test.go +++ b/lib/ulimit/ulimit_test.go @@ -1,6 +1,9 @@ +//stm: ignore //go:build !windows // +build !windows +// This file tests file descriptor limits; since this is an OS feature, it should not be annotated + package ulimit import ( diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go index ee2f0cdce..637600bfc 100644 --- a/markets/dagstore/miner_api_test.go +++ b/markets/dagstore/miner_api_test.go @@ -1,3 +1,4 @@ +//stm: #unit package dagstore import ( @@ -88,6 +89,7 @@ func TestLotusAccessorFetchUnsealedPiece(t *testing.T) { } // Fetch the piece + //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001 r, err := api.FetchUnsealedPiece(ctx, cid1) if tc.expectErr { require.Error(t, err) @@ -101,6 +103,7 @@ func TestLotusAccessorFetchUnsealedPiece(t *testing.T) { require.Equal(t, tc.fetchedData, string(bz)) + //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 uns, err := api.IsUnsealed(ctx, cid1) require.NoError(t, err) require.Equal(t, tc.isUnsealed, uns) @@ -126,6 +129,7 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) { require.NoError(t, err) // Check that the data length is correct + //stm: @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 len, err := api.GetUnpaddedCARSize(ctx, cid1) require.NoError(t, err) require.EqualValues(t, 10, len) @@ -160,6 +164,7 @@ func TestThrottle(t *testing.T) { errgrp, ctx := errgroup.WithContext(context.Background()) for i := 0; i < 10; i++ { errgrp.Go(func() error { + //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001 r, err := api.FetchUnsealedPiece(ctx, cid1) if err == nil { _ = r.Close() diff --git a/markets/dagstore/mount_test.go b/markets/dagstore/mount_test.go index d6ea54964..82cfa7cb8 100644 --- a/markets/dagstore/mount_test.go +++ b/markets/dagstore/mount_test.go @@ -1,3 +1,4 @@ +//stm: @unit package dagstore import ( @@ -17,6 +18,8 @@ import ( ) func TestLotusMount(t *testing.T) { + //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 + //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 ctx := context.Background() bgen := blocksutil.NewBlockGenerator() cid := bgen.Next().Cid() @@ -88,6 +91,7 @@ func TestLotusMount(t *testing.T) { } func TestLotusMountDeserialize(t *testing.T) { + //stm: @MARKET_DAGSTORE_DESERIALIZE_CID_001 api := &minerAPI{} bgen := blocksutil.NewBlockGenerator() @@ -115,6 +119,8 @@ func TestLotusMountDeserialize(t *testing.T) { } func TestLotusMountRegistration(t *testing.T) { + //stm: @MARKET_DAGSTORE_FETCH_UNSEALED_PIECE_001, @MARKET_DAGSTORE_GET_UNPADDED_CAR_SIZE_001 + //stm: @MARKET_DAGSTORE_IS_PIECE_UNSEALED_001 ctx := context.Background() bgen := blocksutil.NewBlockGenerator() cid := bgen.Next().Cid() diff --git a/markets/dagstore/wrapper_migration_test.go b/markets/dagstore/wrapper_migration_test.go index a08e46e6c..6c04f6929 100644 --- a/markets/dagstore/wrapper_migration_test.go +++ b/markets/dagstore/wrapper_migration_test.go @@ -1,3 +1,4 @@ +//stm: #integration package dagstore import ( @@ -59,6 +60,7 @@ func TestShardRegistration(t *testing.T) { deals := []storagemarket.MinerDeal{{ // Should be registered + //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_001 State: storagemarket.StorageDealSealing, SectorNumber: unsealedSector1, ClientDealProposal: market.ClientDealProposal{ @@ -77,6 +79,7 @@ func TestShardRegistration(t *testing.T) { }, }, { // Should be ignored because deal is no longer active + //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_003 State: storagemarket.StorageDealError, SectorNumber: unsealedSector2, ClientDealProposal: market.ClientDealProposal{ @@ -114,6 +117,7 @@ func TestShardRegistration(t *testing.T) { require.True(t, migrated) require.NoError(t, err) + //stm: @MARKET_DAGSTORE_GET_ALL_SHARDS_001 info := dagst.AllShardsInfo() require.Len(t, info, 2) for _, i := range info { @@ -121,6 +125,7 @@ func TestShardRegistration(t *testing.T) { } // Run register shard migration again + //stm: @MARKET_DAGSTORE_MIGRATE_DEALS_002 migrated, err = w.MigrateDeals(ctx, deals) require.False(t, migrated) require.NoError(t, err) diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go index 3debee061..ae99756cf 100644 --- a/markets/dagstore/wrapper_test.go +++ b/markets/dagstore/wrapper_test.go @@ -1,3 +1,4 @@ +//stm: #unit package dagstore import ( @@ -56,6 +57,7 @@ func TestWrapperAcquireRecovery(t *testing.T) { } w.dagst = mock + //stm: @MARKET_DAGSTORE_ACQUIRE_SHARD_002 mybs, err := w.LoadShard(ctx, pieceCid) require.NoError(t, err) @@ -104,10 +106,12 @@ func TestWrapperBackground(t *testing.T) { w.dagst = mock // Start up the wrapper + //stm: @MARKET_DAGSTORE_START_001 err = w.Start(ctx) require.NoError(t, err) // Expect GC to be called automatically + //stm: @MARKET_DAGSTORE_START_002 tctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() select { @@ -118,6 +122,7 @@ func TestWrapperBackground(t *testing.T) { // Expect that when the wrapper is closed it will call close on the // DAG store + //stm: @MARKET_DAGSTORE_CLOSE_001 err = w.Close() require.NoError(t, err) diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index 0d542a45d..2d13a64a1 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -40,6 +40,7 @@ func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelStat "sent", state.Sent(), "received", state.Received(), "queued", state.Queued(), + "received count", state.ReceivedCidsTotal(), "total size", state.TotalSize(), "remote peer", state.OtherPeer(), "event message", event.Message, diff --git a/markets/storageadapter/dealpublisher_test.go b/markets/storageadapter/dealpublisher_test.go index 351a00171..a0ee0ae06 100644 --- a/markets/storageadapter/dealpublisher_test.go +++ b/markets/storageadapter/dealpublisher_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storageadapter import ( @@ -28,6 +29,7 @@ import ( ) func TestDealPublisher(t *testing.T) { + //stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001 oldClock := build.Clock t.Cleanup(func() { build.Clock = oldClock }) mc := clock.NewMock() @@ -188,6 +190,8 @@ func TestDealPublisher(t *testing.T) { } func TestForcePublish(t *testing.T) { + //stm: @MARKET_DEAL_PUBLISHER_PUBLISH_001, @MARKET_DEAL_PUBLISHER_GET_PENDING_DEALS_001 + //stm: @MARKET_DEAL_PUBLISHER_FORCE_PUBLISH_ALL_001 dpapi := newDPAPI(t) // Create a deal publisher diff --git a/markets/storageadapter/ondealsectorcommitted_test.go b/markets/storageadapter/ondealsectorcommitted_test.go index 86c01799a..b11bf03a5 100644 --- a/markets/storageadapter/ondealsectorcommitted_test.go +++ b/markets/storageadapter/ondealsectorcommitted_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storageadapter import ( @@ -228,6 +229,7 @@ func TestOnDealSectorPreCommitted(t *testing.T) { Err2: data.currentDealInfoErr2, } scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) + //stm: @MARKET_ADAPTER_ON_SECTOR_PRE_COMMIT_001 err = scm.OnDealSectorPreCommitted(ctx, provider, proposal, publishCid, cb) if data.expectedError == nil { require.NoError(t, err) @@ -439,6 +441,7 @@ func TestOnDealSectorCommitted(t *testing.T) { Err2: data.currentDealInfoErr2, } scm := newSectorCommittedManager(eventsAPI, mockDIAPI, mockPCAPI) + //stm: @MARKET_ADAPTER_ON_SECTOR_COMMIT_001 err = scm.OnDealSectorCommitted(ctx, provider, sectorNumber, proposal, publishCid, cb) if data.expectedError == nil { require.NoError(t, err) diff --git a/miner/warmup.go b/miner/warmup.go index be5ac3ea7..3b73afbc9 100644 --- a/miner/warmup.go +++ b/miner/warmup.go @@ -74,6 +74,7 @@ out: SealProof: si.SealProof, SectorNumber: sector, SealedCID: si.SealedCID, + SectorKey: si.SectorKeyCID, }, }, r, ts.Height(), nv) if err != nil { diff --git a/node/builder.go b/node/builder.go index c4f858ed1..f0106ad97 100644 --- a/node/builder.go +++ b/node/builder.go @@ -33,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/alerting" + "github.com/filecoin-project/lotus/lib/lotuslog" "github.com/filecoin-project/lotus/lib/peermgr" _ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/secp" @@ -221,7 +222,7 @@ var LibP2P = Options( Override(ConnGaterKey, lp2p.ConnGaterOption), // Services (resource management) - Override(new(network.ResourceManager), lp2p.ResourceManager), + Override(new(network.ResourceManager), lp2p.ResourceManager(200)), Override(ResourceManagerKey, lp2p.ResourceManagerOption), ) @@ -253,6 +254,9 @@ func Base() Option { // Config sets up constructors based on the provided Config func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { + // setup logging early + lotuslog.SetLevelsFromConfig(cfg.Logging.SubsystemLevels) + return Options( func(s *Settings) error { s.Config = true; return nil }, Override(new(dtypes.APIEndpoint), func() (dtypes.APIEndpoint, error) { @@ -282,6 +286,7 @@ func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option { cfg.Libp2p.ConnMgrHigh, time.Duration(cfg.Libp2p.ConnMgrGrace), cfg.Libp2p.ProtectedPeers)), + Override(new(network.ResourceManager), lp2p.ResourceManager(cfg.Libp2p.ConnMgrHigh)), Override(new(*pubsub.PubSub), lp2p.GossipSub), Override(new(*config.Pubsub), &cfg.Pubsub), diff --git a/node/builder_chain.go b/node/builder_chain.go index afee868fd..226ecac68 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -65,7 +65,7 @@ var ChainNode = Options( Override(new(ffiwrapper.Verifier), ffiwrapper.ProofVerifier), Override(new(ffiwrapper.Prover), ffiwrapper.ProofProver), - // Consensus: VM + // Consensus: LegacyVM Override(new(vm.SyscallBuilder), vm.Syscalls), // Consensus: Chain storage/access diff --git a/node/config/def.go b/node/config/def.go index 233eccdd5..edc7ffef5 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -47,6 +47,11 @@ func defCommon() Common { ListenAddress: "/ip4/127.0.0.1/tcp/1234/http", Timeout: Duration(30 * time.Second), }, + Logging: Logging{ + SubsystemLevels: map[string]string{ + "example-subsystem": "INFO", + }, + }, Libp2p: Libp2p{ ListenAddresses: []string{ "/ip4/0.0.0.0/tcp/0", diff --git a/node/config/def_test.go b/node/config/def_test.go index a7a0e77ca..cc37a782e 100644 --- a/node/config/def_test.go +++ b/node/config/def_test.go @@ -1,3 +1,4 @@ +//stm: #unit package config import ( @@ -12,6 +13,7 @@ import ( ) func TestDefaultFullNodeRoundtrip(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_001 c := DefaultFullNode() var s string @@ -32,7 +34,26 @@ func TestDefaultFullNodeRoundtrip(t *testing.T) { require.True(t, reflect.DeepEqual(c, c2)) } +func TestDefaultFullNodeCommentRoundtrip(t *testing.T) { + c := DefaultFullNode() + + var s string + { + c, err := ConfigComment(DefaultFullNode()) + require.NoError(t, err) + s = string(c) + } + + c2, err := FromReader(strings.NewReader(s), DefaultFullNode()) + require.NoError(t, err) + + fmt.Println(s) + + require.True(t, reflect.DeepEqual(c, c2)) +} + func TestDefaultMinerRoundtrip(t *testing.T) { + //stm: @OTHER_IMPLEMENTATION_001 c := DefaultStorageMiner() var s string diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index ccee363b4..972c196f7 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -127,6 +127,12 @@ of automatically performing on-chain operations.`, Comment: ``, }, + { + Name: "Logging", + Type: "Logging", + + Comment: ``, + }, { Name: "Libp2p", Type: "Libp2p", @@ -484,6 +490,14 @@ count towards this limit.`, closed by the connection manager.`, }, }, + "Logging": []DocField{ + { + Name: "SubsystemLevels", + Type: "map[string]string", + + Comment: `SubsystemLevels specify per-subsystem log levels`, + }, + }, "MinerAddressConfig": []DocField{ { Name: "PreCommitControl", @@ -736,6 +750,12 @@ avoid the relatively high cost of unsealing the data later, at the cost of more Comment: `Run sector finalization before submitting sector proof to the chain`, }, + { + Name: "MakeCCSectorsAvailable", + Type: "bool", + + Comment: `After sealing CC sectors, make them available for upgrading with deals`, + }, { Name: "CollateralFromMinerBalance", Type: "bool", diff --git a/node/config/doc_util.go b/node/config/doc_util.go index ee70a9cfd..b88333238 100644 --- a/node/config/doc_util.go +++ b/node/config/doc_util.go @@ -16,7 +16,7 @@ func findDoc(root interface{}, section, name string) *DocField { return findDocSect("Common", section, name) } -func findDocSect(root string, section, name string) *DocField { +func findDocSect(root, section, name string) *DocField { path := strings.Split(section, ".") docSection := Doc[root] diff --git a/node/config/load.go b/node/config/load.go index db3914b6b..a76db7caf 100644 --- a/node/config/load.go +++ b/node/config/load.go @@ -69,7 +69,7 @@ func ConfigUpdate(cfgCur, cfgDef interface{}, comment bool) ([]byte, error) { } if comment { - // create a map of default lines so we can comment those out later + // create a map of default lines, so we can comment those out later defLines := strings.Split(defStr, "\n") defaults := map[string]struct{}{} for i := range defLines { diff --git a/node/config/load_test.go b/node/config/load_test.go index 9267b44ad..ccc227eb8 100644 --- a/node/config/load_test.go +++ b/node/config/load_test.go @@ -12,6 +12,7 @@ import ( ) func TestDecodeNothing(t *testing.T) { + //stm: @NODE_CONFIG_LOAD_FILE_002 assert := assert.New(t) { @@ -30,6 +31,7 @@ func TestDecodeNothing(t *testing.T) { } func TestParitalConfig(t *testing.T) { + //stm: @NODE_CONFIG_LOAD_FILE_003 assert := assert.New(t) cfgString := ` [API] diff --git a/node/config/types.go b/node/config/types.go index 55f924f2a..2e9357993 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -13,10 +13,11 @@ import ( // Common is common config between full node and miner type Common struct { - API API - Backup Backup - Libp2p Libp2p - Pubsub Pubsub + API API + Backup Backup + Logging Logging + Libp2p Libp2p + Pubsub Pubsub } // FullNode is a full node config @@ -39,6 +40,12 @@ type Backup struct { DisableMetadataLog bool } +// Logging is the logging system config +type Logging struct { + // SubsystemLevels specify per-subsystem log levels + SubsystemLevels map[string]string +} + // StorageMiner is a miner config type StorageMiner struct { Common @@ -243,6 +250,9 @@ type SealingConfig struct { // Run sector finalization before submitting sector proof to the chain FinalizeEarly bool + // After sealing CC sectors, make them available for upgrading with deals + MakeCCSectorsAvailable bool + // Whether to use available miner balance for sector collateral instead of sending it with each message CollateralFromMinerBalance bool // Minimum available balance to keep in the miner actor before sending it with messages diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 1730b7a7a..f105d3152 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -47,7 +47,6 @@ import ( "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/writer" datatransfer "github.com/filecoin-project/go-data-transfer" @@ -1263,28 +1262,12 @@ func (a *API) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Addre } func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) { - - // Hard-code the sector type to 32GiBV1_1, because: - // - ffiwrapper.GeneratePieceCIDFromFile requires a RegisteredSealProof - // - commP itself is sector-size independent, with rather low probability of that changing - // ( note how the final rust call is identical for every RegSP type ) - // https://github.com/filecoin-project/rust-filecoin-proofs-api/blob/v5.0.0/src/seal.rs#L1040-L1050 - // - // IF/WHEN this changes in the future we will have to be able to calculate - // "old style" commP, and thus will need to introduce a version switch or similar - arbitraryProofType := abi.RegisteredSealProof_StackedDrg64GiBV1_1 - rdr, err := os.Open(inpath) if err != nil { return nil, err } defer rdr.Close() //nolint:errcheck - stat, err := rdr.Stat() - if err != nil { - return nil, err - } - // check that the data is a car file; if it's not, retrieval won't work _, err = car.ReadHeader(bufio.NewReader(rdr)) if err != nil { @@ -1295,16 +1278,20 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet return nil, xerrors.Errorf("seek to start: %w", err) } - pieceReader, pieceSize := padreader.New(rdr, uint64(stat.Size())) - commP, err := ffiwrapper.GeneratePieceCIDFromFile(arbitraryProofType, pieceReader, pieceSize) + w := &writer.Writer{} + _, err = io.CopyBuffer(w, rdr, make([]byte, writer.CommPBuf)) + if err != nil { + return nil, xerrors.Errorf("copy into commp writer: %w", err) + } + commp, err := w.Sum() if err != nil { return nil, xerrors.Errorf("computing commP failed: %w", err) } return &api.CommPRet{ - Root: commP, - Size: pieceSize, + Root: commp.PieceCID, + Size: commp.PieceSize.Unpadded(), }, nil } diff --git a/node/impl/client/import_test.go b/node/impl/client/import_test.go index 1d7af86cb..06295efc7 100644 --- a/node/impl/client/import_test.go +++ b/node/impl/client/import_test.go @@ -27,6 +27,7 @@ import ( // This test uses a full "dense" CARv2, and not a filestore (positional mapping). func TestRoundtripUnixFS_Dense(t *testing.T) { + //stm: @CLIENT_DATA_IMPORT_002 ctx := context.Background() inputPath, inputContents := genInputFile(t) @@ -75,6 +76,7 @@ func TestRoundtripUnixFS_Dense(t *testing.T) { } func TestRoundtripUnixFS_Filestore(t *testing.T) { + //stm: @CLIENT_DATA_IMPORT_001 ctx := context.Background() a := &API{ Imports: &imports.Manager{}, diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go index ac2835790..3b4084d43 100644 --- a/node/impl/full/gas_test.go +++ b/node/impl/full/gas_test.go @@ -13,6 +13,7 @@ import ( ) func TestMedian(t *testing.T) { + //stm: @MARKET_GAS_GET_MEDIAN_PREMIUM_001 require.Equal(t, types.NewInt(5), medianGasPremium([]GasMeta{ {big.NewInt(5), build.BlockGasTarget}, }, 1)) diff --git a/node/impl/storminer.go b/node/impl/storminer.go index c3577d9f5..4c0f889a6 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -676,18 +676,18 @@ func (tc *transferConverter) convertTransfer(channelID datatransfer.ChannelID, h } var channelIDPtr *datatransfer.ChannelID if !hasChannelID { - diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %d", requestID)) + diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %s", requestID)) } else { channelIDPtr = &channelID if isCurrentChannelRequest && !hasState { diagnostics = append(diagnostics, fmt.Sprintf("No current request state for data transfer channel id %s", channelID)) } else if !isCurrentChannelRequest && hasState { - diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %d is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID)) + diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %s is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID)) } } diagnostics = append(diagnostics, tc.gsDiagnostics[requestID]...) transfer := &api.GraphSyncDataTransfer{ - RequestID: requestID, + RequestID: &requestID, RequestState: stateString, IsCurrentChannelRequest: isCurrentChannelRequest, ChannelID: channelIDPtr, @@ -717,7 +717,7 @@ func (tc *transferConverter) collectRemainingTransfers() { channelID := channelID cs := api.NewDataTransferChannel(channelState.SelfPeer(), channelState) transfer := &api.GraphSyncDataTransfer{ - RequestID: graphsync.RequestID(-1), + RequestID: nil, RequestState: "graphsync state unknown", IsCurrentChannelRequest: false, ChannelID: &channelID, diff --git a/node/modules/lp2p/libp2p.go b/node/modules/lp2p/libp2p.go index 997792d48..5d8ece732 100644 --- a/node/modules/lp2p/libp2p.go +++ b/node/modules/lp2p/libp2p.go @@ -10,10 +10,10 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p" + connmgr "github.com/libp2p/go-libp2p-connmgr" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" - "github.com/libp2p/go-libp2p/p2p/net/connmgr" "go.uber.org/fx" ) diff --git a/node/modules/lp2p/rcmgr.go b/node/modules/lp2p/rcmgr.go index 0bc4dd6b2..d0906fd8f 100644 --- a/node/modules/lp2p/rcmgr.go +++ b/node/modules/lp2p/rcmgr.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/bits" "os" "path/filepath" @@ -15,6 +16,8 @@ import ( "github.com/libp2p/go-libp2p-core/protocol" rcmgr "github.com/libp2p/go-libp2p-resource-manager" + logging "github.com/ipfs/go-log/v2" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/repo" @@ -22,55 +25,104 @@ import ( "go.opencensus.io/tag" ) -func ResourceManager(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) { - var limiter *rcmgr.BasicLimiter - var opts []rcmgr.Option +func ResourceManager(connMgrHi uint) func(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) { + return func(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) { + envvar := os.Getenv("LOTUS_RCMGR") + if envvar == "" || envvar == "0" { + // TODO opt-in for now -- flip this to enabled by default once we are comfortable with testing + log.Info("libp2p resource manager is disabled") + return network.NullResourceManager, nil + } - repoPath := repo.Path() + log.Info("libp2p resource manager is enabled") + // enable debug logs for rcmgr + logging.SetLogLevel("rcmgr", "debug") - // create limiter -- parse $repo/limits.json if exists - limitsFile := filepath.Join(repoPath, "limits.json") - limitsIn, err := os.Open(limitsFile) - switch { - case err == nil: - defer limitsIn.Close() //nolint:errcheck - limiter, err = rcmgr.NewDefaultLimiterFromJSON(limitsIn) + // Adjust default limits + // - give it more memory, up to 4G, min of 1G + // - if maxconns are too high, adjust Conn/FD/Stream limits + defaultLimits := rcmgr.DefaultLimits.WithSystemMemory(.125, 1<<30, 4<<30) + maxconns := int(connMgrHi) + if 2*maxconns > defaultLimits.SystemBaseLimit.ConnsInbound { + // adjust conns to 2x to allow for two conns per peer (TCP+QUIC) + defaultLimits.SystemBaseLimit.ConnsInbound = logScale(2 * maxconns) + defaultLimits.SystemBaseLimit.ConnsOutbound = logScale(2 * maxconns) + defaultLimits.SystemBaseLimit.Conns = logScale(4 * maxconns) + + defaultLimits.SystemBaseLimit.StreamsInbound = logScale(16 * maxconns) + defaultLimits.SystemBaseLimit.StreamsOutbound = logScale(64 * maxconns) + defaultLimits.SystemBaseLimit.Streams = logScale(64 * maxconns) + + if 2*maxconns > defaultLimits.SystemBaseLimit.FD { + defaultLimits.SystemBaseLimit.FD = logScale(2 * maxconns) + } + + defaultLimits.ServiceBaseLimit.StreamsInbound = logScale(8 * maxconns) + defaultLimits.ServiceBaseLimit.StreamsOutbound = logScale(32 * maxconns) + defaultLimits.ServiceBaseLimit.Streams = logScale(32 * maxconns) + + defaultLimits.ProtocolBaseLimit.StreamsInbound = logScale(8 * maxconns) + defaultLimits.ProtocolBaseLimit.StreamsOutbound = logScale(32 * maxconns) + defaultLimits.ProtocolBaseLimit.Streams = logScale(32 * maxconns) + + log.Info("adjusted default resource manager limits") + } + + // initialize + var limiter *rcmgr.BasicLimiter + var opts []rcmgr.Option + + repoPath := repo.Path() + + // create limiter -- parse $repo/limits.json if exists + limitsFile := filepath.Join(repoPath, "limits.json") + limitsIn, err := os.Open(limitsFile) + switch { + case err == nil: + defer limitsIn.Close() //nolint:errcheck + limiter, err = rcmgr.NewLimiterFromJSON(limitsIn, defaultLimits) + if err != nil { + return nil, fmt.Errorf("error parsing limit file: %w", err) + } + + case errors.Is(err, os.ErrNotExist): + limiter = rcmgr.NewStaticLimiter(defaultLimits) + + default: + return nil, err + } + + // TODO: also set appropriate default limits for lotus protocols + libp2p.SetDefaultServiceLimits(limiter) + + opts = append(opts, rcmgr.WithMetrics(rcmgrMetrics{})) + + if os.Getenv("LOTUS_DEBUG_RCMGR") != "" { + debugPath := filepath.Join(repoPath, "debug") + if err := os.MkdirAll(debugPath, 0755); err != nil { + return nil, fmt.Errorf("error creating debug directory: %w", err) + } + traceFile := filepath.Join(debugPath, "rcmgr.json.gz") + opts = append(opts, rcmgr.WithTrace(traceFile)) + } + + mgr, err := rcmgr.NewResourceManager(limiter, opts...) if err != nil { - return nil, fmt.Errorf("error parsing limit file: %w", err) + return nil, fmt.Errorf("error creating resource manager: %w", err) } - case errors.Is(err, os.ErrNotExist): - limiter = rcmgr.NewDefaultLimiter() + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return mgr.Close() + }}) - default: - return nil, err + return mgr, nil } +} - // TODO: also set appropriate default limits for lotus protocols - libp2p.SetDefaultServiceLimits(limiter) - - opts = append(opts, rcmgr.WithMetrics(rcmgrMetrics{})) - - if os.Getenv("LOTUS_DEBUG_RCMGR") != "" { - debugPath := filepath.Join(repoPath, "debug") - if err := os.MkdirAll(debugPath, 0755); err != nil { - return nil, fmt.Errorf("error creating debug directory: %w", err) - } - traceFile := filepath.Join(debugPath, "rcmgr.json.gz") - opts = append(opts, rcmgr.WithTrace(traceFile)) - } - - mgr, err := rcmgr.NewResourceManager(limiter, opts...) - if err != nil { - return nil, fmt.Errorf("error creating resource manager: %w", err) - } - - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return mgr.Close() - }}) - - return mgr, nil +func logScale(val int) int { + bitlen := bits.Len(uint(val)) + return 1 << bitlen } func ResourceManagerOption(mgr network.ResourceManager) Libp2pOpts { diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index 414f38917..179363e29 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -918,6 +918,7 @@ func NewSetSealConfigFunc(r repo.LockedRepo) (dtypes.SetSealingConfigFunc, error MaxSealingSectorsForDeals: cfg.MaxSealingSectorsForDeals, CommittedCapacitySectorLifetime: config.Duration(cfg.CommittedCapacitySectorLifetime), WaitDealsDelay: config.Duration(cfg.WaitDealsDelay), + MakeCCSectorsAvailable: cfg.MakeCCSectorsAvailable, AlwaysKeepUnsealedCopy: cfg.AlwaysKeepUnsealedCopy, FinalizeEarly: cfg.FinalizeEarly, @@ -957,6 +958,7 @@ func ToSealingConfig(dealmakingCfg config.DealmakingConfig, sealingCfg config.Se MakeNewSectorForDeals: dealmakingCfg.MakeNewSectorForDeals, CommittedCapacitySectorLifetime: time.Duration(sealingCfg.CommittedCapacitySectorLifetime), WaitDealsDelay: time.Duration(sealingCfg.WaitDealsDelay), + MakeCCSectorsAvailable: sealingCfg.MakeCCSectorsAvailable, AlwaysKeepUnsealedCopy: sealingCfg.AlwaysKeepUnsealedCopy, FinalizeEarly: sealingCfg.FinalizeEarly, diff --git a/node/repo/fsrepo_test.go b/node/repo/fsrepo_test.go index 381ebdcbe..9342258c3 100644 --- a/node/repo/fsrepo_test.go +++ b/node/repo/fsrepo_test.go @@ -2,16 +2,11 @@ package repo import ( - "io/ioutil" - "os" "testing" ) -func genFsRepo(t *testing.T) (*FsRepo, func()) { - path, err := ioutil.TempDir("", "lotus-repo-") - if err != nil { - t.Fatal(err) - } +func genFsRepo(t *testing.T) *FsRepo { + path := t.TempDir() repo, err := NewFS(path) if err != nil { @@ -22,13 +17,15 @@ func genFsRepo(t *testing.T) (*FsRepo, func()) { if err != ErrRepoExists && err != nil { t.Fatal(err) } - return repo, func() { - _ = os.RemoveAll(path) - } + return repo } func TestFsBasic(t *testing.T) { - repo, closer := genFsRepo(t) - defer closer() + //stm: @NODE_FS_REPO_LOCK_001,@NODE_FS_REPO_LOCK_002,@NODE_FS_REPO_UNLOCK_001 + //stm: @NODE_FS_REPO_SET_API_ENDPOINT_001, @NODE_FS_REPO_GET_API_ENDPOINT_001 + //stm: @NODE_FS_REPO_GET_CONFIG_001, @NODE_FS_REPO_SET_CONFIG_001 + //stm: @NODE_FS_REPO_LIST_KEYS_001, @NODE_FS_REPO_PUT_KEY_001 + //stm: @NODE_FS_REPO_GET_KEY_001, NODE_FS_REPO_DELETE_KEY_001 + repo := genFsRepo(t) basicTest(t, repo) } diff --git a/node/repo/memrepo_test.go b/node/repo/memrepo_test.go index fdf609bac..6fc0669da 100644 --- a/node/repo/memrepo_test.go +++ b/node/repo/memrepo_test.go @@ -6,6 +6,7 @@ import ( ) func TestMemBasic(t *testing.T) { + //stm: @REPO_MEM_001 repo := NewMemory(nil) basicTest(t, repo) } diff --git a/node/shutdown_test.go b/node/shutdown_test.go index 15e2af93e..b831c6180 100644 --- a/node/shutdown_test.go +++ b/node/shutdown_test.go @@ -1,3 +1,4 @@ +//stm: #unit package node import ( @@ -10,6 +11,7 @@ import ( ) func TestMonitorShutdown(t *testing.T) { + //stm: @NODE_COMMON_SHUTDOWN_001 signalCh := make(chan struct{}) // Three shutdown handlers. diff --git a/paychmgr/msglistener_test.go b/paychmgr/msglistener_test.go index 4b8ae6f30..38f2351ff 100644 --- a/paychmgr/msglistener_test.go +++ b/paychmgr/msglistener_test.go @@ -15,6 +15,7 @@ func testCids() []cid.Cid { } func TestMsgListener(t *testing.T) { + //stm: @TOKEN_PAYCH_REG_ON_MSG_COMPLETE_001, @TOKEN_PAYCH_FIRE_ON_MSG_COMPLETE_001 ml := newMsgListeners() done := false @@ -33,6 +34,7 @@ func TestMsgListener(t *testing.T) { } func TestMsgListenerNilErr(t *testing.T) { + //stm: @TOKEN_PAYCH_REG_ON_MSG_COMPLETE_001, @TOKEN_PAYCH_FIRE_ON_MSG_COMPLETE_001 ml := newMsgListeners() done := false @@ -50,6 +52,7 @@ func TestMsgListenerNilErr(t *testing.T) { } func TestMsgListenerUnsub(t *testing.T) { + //stm: @TOKEN_PAYCH_REG_ON_MSG_COMPLETE_001, @TOKEN_PAYCH_FIRE_ON_MSG_COMPLETE_001 ml := newMsgListeners() done := false @@ -72,6 +75,7 @@ func TestMsgListenerUnsub(t *testing.T) { } func TestMsgListenerMulti(t *testing.T) { + //stm: @TOKEN_PAYCH_REG_ON_MSG_COMPLETE_001, @TOKEN_PAYCH_FIRE_ON_MSG_COMPLETE_001 ml := newMsgListeners() count := 0 diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index e485c4e83..7f767510b 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -502,6 +502,7 @@ func TestAddVoucherInboundWalletKey(t *testing.T) { sv := createTestVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate) _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_006 // Should fail because there is no wallet key matching the channel To // address (ie, the channel is not "owned" by this node) require.Error(t, err) @@ -513,6 +514,7 @@ func TestAddVoucherInboundWalletKey(t *testing.T) { sv = createTestVoucher(t, ch, voucherLane, nonce, voucherAmount, fromKeyPrivate) _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_001 // Should now pass because there is a wallet key matching the channel To // address require.NoError(t, err) @@ -626,6 +628,7 @@ func TestCheckSpendable(t *testing.T) { } s.mock.setCallResponse(successResponse) + //stm: @TOKEN_PAYCH_CHECK_SPENDABLE_001 // Check that spendable is true secret := []byte("secret") spendable, err := s.mgr.CheckVoucherSpendable(ctx, s.ch, voucher, secret, nil) @@ -655,6 +658,7 @@ func TestCheckSpendable(t *testing.T) { require.NoError(t, err) require.True(t, spendable) + //stm: @TOKEN_PAYCH_CHECK_SPENDABLE_002 // Check that voucher is no longer spendable once it has been submitted _, err = s.mgr.SubmitVoucher(ctx, s.ch, voucher, nil, nil) require.NoError(t, err) diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index 0688301e8..9c5f3b47b 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -1,3 +1,4 @@ +//stm: #unit package paychmgr import ( @@ -59,6 +60,7 @@ func testChannelResponse(t *testing.T, ch address.Address) types.MessageReceipt // TestPaychGetCreateChannelMsg tests that GetPaych sends a message to create // a new channel with the correct funds func TestPaychGetCreateChannelMsg(t *testing.T) { + //stm: @TOKEN_PAYCH_CREATE_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go index ffbffc660..5512cd342 100644 --- a/paychmgr/settle_test.go +++ b/paychmgr/settle_test.go @@ -1,3 +1,4 @@ +//stm: #unit package paychmgr import ( diff --git a/paychmgr/store_test.go b/paychmgr/store_test.go index 563b82978..0c071b1e5 100644 --- a/paychmgr/store_test.go +++ b/paychmgr/store_test.go @@ -1,3 +1,4 @@ +//stm: #unit package paychmgr import ( @@ -13,6 +14,8 @@ import ( ) func TestStore(t *testing.T) { + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001, @TOKEN_PAYCH_LIST_CHANNELS_001 + //stm: @TOKEN_PAYCH_TRACK_CHANNEL_002, @TOKEN_PAYCH_TRACK_CHANNEL_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 01ff9d8d3..d976d9aa2 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -9,6 +9,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" @@ -112,13 +113,25 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk) } -func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { +func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) (bitfield.BitField, error) { tsk, err := types.TipSetKeyFromBytes(tok) if err != nil { - return nil, xerrors.Errorf("faile dto unmarshal TipSetToken to TipSetKey: %w", err) + return bitfield.BitField{}, xerrors.Errorf("failed to unmarshal TipSetToken to TipSetKey: %w", err) } - return s.delegate.StateMinerActiveSectors(ctx, maddr, tsk) + act, err := s.delegate.StateGetActor(ctx, maddr, tsk) + if err != nil { + return bitfield.BitField{}, xerrors.Errorf("getting miner actor: temp error: %+v", err) + } + + stor := store.ActorStore(ctx, blockstore.NewAPIBlockstore(s.delegate)) + + state, err := miner.Load(stor, act) + if err != nil { + return bitfield.BitField{}, xerrors.Errorf("loading miner state: %+v", err) + } + + return miner.AllPartSectors(state, miner.Partition.ActiveSectors) } func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) { diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index a22c32a40..8421c7148 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -79,11 +79,7 @@ func (m *Miner) MarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bo if snap { return m.sealing.MarkForSnapUpgrade(ctx, id) } - return m.sealing.MarkForUpgrade(ctx, id) -} - -func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { - return m.sealing.IsMarkedForUpgrade(id) + return xerrors.Errorf("Old CC upgrade deprecated, use snap deals CC upgrade") } func (m *Miner) SectorAbortUpgrade(sectorNum abi.SectorNumber) error { @@ -147,7 +143,7 @@ func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnC PreCommitMsg: info.PreCommitMessage, CommitMsg: info.CommitMessage, Retries: info.InvalidProofs, - ToUpgrade: m.IsMarkedForUpgrade(sid), + ToUpgrade: false, LastErr: info.LastErr, Log: log, diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index 2fcbe770e..00b1e94cc 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storage import ( @@ -200,6 +201,10 @@ func (m *mockAPI) setChangeHandler(ch *changeHandler) { // TestChangeHandlerBasic verifies we can generate a proof and submit it func TestChangeHandlerBasic(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock @@ -248,6 +253,10 @@ func TestChangeHandlerBasic(t *testing.T) { // chain is already advanced past the confidence interval, we should move from // proving to submitting without a head change in between. func TestChangeHandlerFromProvingToSubmittingNoHeadChange(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_005 + //stm: @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -299,6 +308,10 @@ func TestChangeHandlerFromProvingToSubmittingNoHeadChange(t *testing.T) { // proofs generated we should not submit anything to chain but submit state // should move to completed func TestChangeHandlerFromProvingEmptyProofsToComplete(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_005, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_006 + //stm: @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -349,6 +362,9 @@ func TestChangeHandlerFromProvingEmptyProofsToComplete(t *testing.T) { // TestChangeHandlerDontStartUntilProvingPeriod tests that the handler // ignores updates until the proving period has been reached. func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004 s := makeScaffolding(t) mock := s.mock @@ -387,6 +403,9 @@ func TestChangeHandlerDontStartUntilProvingPeriod(t *testing.T) { // TestChangeHandlerStartProvingNextDeadline verifies that the proof handler // starts proving the next deadline after the current one func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -436,6 +455,10 @@ func TestChangeHandlerStartProvingNextDeadline(t *testing.T) { // TestChangeHandlerProvingRounds verifies we can generate several rounds of // proofs as the chain head advances func TestChangeHandlerProvingRounds(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_002, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_003, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_005 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -506,6 +529,9 @@ func TestChangeHandlerProvingRounds(t *testing.T) { // TestChangeHandlerProvingErrorRecovery verifies that the proof handler // recovers correctly from an error func TestChangeHandlerProvingErrorRecovery(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -547,6 +573,10 @@ func TestChangeHandlerProvingErrorRecovery(t *testing.T) { // TestChangeHandlerSubmitErrorRecovery verifies that the submit handler // recovers correctly from an error func TestChangeHandlerSubmitErrorRecovery(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock @@ -616,6 +646,9 @@ func TestChangeHandlerSubmitErrorRecovery(t *testing.T) { // TestChangeHandlerProveExpiry verifies that the prove handler // behaves correctly on expiry func TestChangeHandlerProveExpiry(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -654,6 +687,9 @@ func TestChangeHandlerProveExpiry(t *testing.T) { // TestChangeHandlerSubmitExpiry verifies that the submit handler // behaves correctly on expiry func TestChangeHandlerSubmitExpiry(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_002, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -717,6 +753,9 @@ func TestChangeHandlerSubmitExpiry(t *testing.T) { // TestChangeHandlerProveRevert verifies that the prove handler // behaves correctly on revert func TestChangeHandlerProveRevert(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 s := makeScaffolding(t) mock := s.mock @@ -753,6 +792,10 @@ func TestChangeHandlerProveRevert(t *testing.T) { // TestChangeHandlerSubmittingRevert verifies that the submit handler // behaves correctly when there's a revert from the submitting state func TestChangeHandlerSubmittingRevert(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock @@ -824,6 +867,10 @@ func TestChangeHandlerSubmittingRevert(t *testing.T) { // TestChangeHandlerSubmitCompleteRevert verifies that the submit handler // behaves correctly when there's a revert from the submit complete state func TestChangeHandlerSubmitCompleteRevert(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock @@ -885,6 +932,10 @@ func TestChangeHandlerSubmitCompleteRevert(t *testing.T) { // TestChangeHandlerSubmitRevertTwoEpochs verifies that the submit handler // behaves correctly when the revert is two epochs deep func TestChangeHandlerSubmitRevertTwoEpochs(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_002, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock @@ -986,6 +1037,10 @@ func TestChangeHandlerSubmitRevertTwoEpochs(t *testing.T) { // behaves correctly when the revert is two epochs deep and the advance is // to a lower height than before func TestChangeHandlerSubmitRevertAdvanceLess(t *testing.T) { + //stm: @WDPOST_CHANGE_HANDLER_START_001, @WDPOST_CHANGE_HANDLER_UPDATE_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_001, @WDPOST_SUBMIT_HANDLER_PROCESS_HEAD_CHANGE_PW_001 + //stm: @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_004, @WDPOST_SUBMIT_HANDLER_SUBMIT_IF_READY_002, @WDPOST_PROVE_HANDLER_PROCESS_POST_RESULT_001 + //stm: @WDPOST_SUBMIT_HANDLER_PROCESS_PROCESS_RESULTS_001 s := makeScaffolding(t) mock := s.mock diff --git a/storage/wdpost_nextdl_test.go b/storage/wdpost_nextdl_test.go index 4a23bad65..31e8b7d6d 100644 --- a/storage/wdpost_nextdl_test.go +++ b/storage/wdpost_nextdl_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storage import ( @@ -22,6 +23,7 @@ func TestNextDeadline(t *testing.T) { require.EqualValues(t, 60, di.Close) for i := 1; i < 1+int(miner.WPoStPeriodDeadlines)*2; i++ { + //stm: @WDPOST_NEXT_DEADLINE_001 di = nextDeadline(di) deadlineIdx = i % int(miner.WPoStPeriodDeadlines) expPeriodStart := int(miner.WPoStProvingPeriod) * (i / int(miner.WPoStPeriodDeadlines)) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 41ce5a2e9..97baa7e07 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storage import ( @@ -176,6 +177,10 @@ func (m mockFaultTracker) CheckProvable(ctx context.Context, pp abi.RegisteredPo // TestWDPostDoPost verifies that doPost will send the correct number of window // PoST messages for a given number of partitions func TestWDPostDoPost(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 ctx := context.Background() expectedMsgCount := 5 @@ -190,6 +195,7 @@ func TestWDPostDoPost(t *testing.T) { // Work out the number of partitions that can be included in a message // without exceeding the message sector limit + //stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001 partitionsPerMsg, err := policy.GetMaxPoStPartitions(network.Version13, proofType) require.NoError(t, err) if partitionsPerMsg > miner5.AddressedPartitionsMax { diff --git a/tools/stats/headbuffer/head_buffer_test.go b/tools/stats/headbuffer/head_buffer_test.go index 8a748c714..d39cbfdc5 100644 --- a/tools/stats/headbuffer/head_buffer_test.go +++ b/tools/stats/headbuffer/head_buffer_test.go @@ -1,3 +1,4 @@ +//stm: #unit package headbuffer import ( @@ -8,6 +9,7 @@ import ( ) func TestHeadBuffer(t *testing.T) { + //stm: @TOOLS_HEAD_BUFFER_PUSH_001, @TOOLS_HEAD_BUFFER_POP_001 t.Run("Straight Push through", func(t *testing.T) { hb := NewHeadChangeStackBuffer(5) require.Nil(t, hb.Push(&api.HeadChange{Type: "1"}))