Merge branch 'master' into feat/docker-compose

This commit is contained in:
Cory Schwartz 2021-07-22 12:42:58 -07:00
commit ab02ca7e38
91 changed files with 1349 additions and 906 deletions

View File

@ -1,224 +1,37 @@
# Lotus changelog
# 1.11.0-rc1 / 2021-06-28
# 1.10.1 / 2021-07-05
This is the first release candidate for the optional Lotus v1.11.0 release that introduces several months of bugfixes and feature development.
This is an optional but **highly recommended** release of Lotus for lotus miners that has many bug fixes and improvements based on the feedback we got from the community since HyperDrive.
- github.com/filecoin-project/lotus:
- Lotus version 1.11.0
- gateway: Add support for Version method ([filecoin-project/lotus#6618](https://github.com/filecoin-project/lotus/pull/6618))
- Miner SimultaneousTransfers config ([filecoin-project/lotus#6612](https://github.com/filecoin-project/lotus/pull/6612))
- revamped integration test kit (aka. Operation Sparks Joy) ([filecoin-project/lotus#6329](https://github.com/filecoin-project/lotus/pull/6329))
- downgrade libp2p/go-libp2p-yamux to v0.5.1. ([filecoin-project/lotus#6605](https://github.com/filecoin-project/lotus/pull/6605))
- Fix wallet error messages ([filecoin-project/lotus#6594](https://github.com/filecoin-project/lotus/pull/6594))
- Fix CircleCI gen ([filecoin-project/lotus#6589](https://github.com/filecoin-project/lotus/pull/6589))
- Make query-ask CLI more graceful ([filecoin-project/lotus#6590](https://github.com/filecoin-project/lotus/pull/6590))
- ([filecoin-project/lotus#6406](https://github.com/filecoin-project/lotus/pull/6406))
- move with changed name ([filecoin-project/lotus#6587](https://github.com/filecoin-project/lotus/pull/6587))
- scale up sector expiration to avoid sector expire in batch-pre-commit waitting ([filecoin-project/lotus#6566](https://github.com/filecoin-project/lotus/pull/6566))
- Merge release branch into master ([filecoin-project/lotus#6583](https://github.com/filecoin-project/lotus/pull/6583))
- ([filecoin-project/lotus#6582](https://github.com/filecoin-project/lotus/pull/6582))
- fix circleci being out of sync. ([filecoin-project/lotus#6573](https://github.com/filecoin-project/lotus/pull/6573))
- dynamic circleci config for streamlining test execution ([filecoin-project/lotus#6561](https://github.com/filecoin-project/lotus/pull/6561))
- Merge 1.10 branch into master ([filecoin-project/lotus#6571](https://github.com/filecoin-project/lotus/pull/6571))
- Fix helptext ([filecoin-project/lotus#6560](https://github.com/filecoin-project/lotus/pull/6560))
- extern/storage: add ability to ignore worker resources when scheduling. ([filecoin-project/lotus#6542](https://github.com/filecoin-project/lotus/pull/6542))
- Merge 1.10 branch into master ([filecoin-project/lotus#6540](https://github.com/filecoin-project/lotus/pull/6540))
- Initial draft: basic build instructions on Readme ([filecoin-project/lotus#6498](https://github.com/filecoin-project/lotus/pull/6498))
- fix commit finalize failed ([filecoin-project/lotus#6521](https://github.com/filecoin-project/lotus/pull/6521))
- Dynamic Retrieval pricing ([filecoin-project/lotus#6175](https://github.com/filecoin-project/lotus/pull/6175))
- Fix soup ([filecoin-project/lotus#6501](https://github.com/filecoin-project/lotus/pull/6501))
- fix: pick the correct partitions-per-post limit ([filecoin-project/lotus#6502](https://github.com/filecoin-project/lotus/pull/6502))
- Fix the build
- Adjust various CLI display ratios to arbitrary precision ([filecoin-project/lotus#6309](https://github.com/filecoin-project/lotus/pull/6309))
- Add utils to use multisigs as miner owners ([filecoin-project/lotus#6490](https://github.com/filecoin-project/lotus/pull/6490))
- Test multicore SDR support ([filecoin-project/lotus#6479](https://github.com/filecoin-project/lotus/pull/6479))
- sealing: Fix restartSectors race ([filecoin-project/lotus#6495](https://github.com/filecoin-project/lotus/pull/6495))
- Merge 1.10 into master ([filecoin-project/lotus#6487](https://github.com/filecoin-project/lotus/pull/6487))
- Unit tests for sector batchers ([filecoin-project/lotus#6432](https://github.com/filecoin-project/lotus/pull/6432))
- Merge 1.10 changes into master ([filecoin-project/lotus#6466](https://github.com/filecoin-project/lotus/pull/6466))
- Update chain list with correct help instructions ([filecoin-project/lotus#6465](https://github.com/filecoin-project/lotus/pull/6465))
- clean failed sectors in batch commit ([filecoin-project/lotus#6451](https://github.com/filecoin-project/lotus/pull/6451))
- itests/kit: add guard to ensure imports from tests only. ([filecoin-project/lotus#6445](https://github.com/filecoin-project/lotus/pull/6445))
- consolidate integration tests into `itests` package; create test kit; cleanup ([filecoin-project/lotus#6311](https://github.com/filecoin-project/lotus/pull/6311))
- Remove rc changelog, compile the new changelog for final release only ([filecoin-project/lotus#6444](https://github.com/filecoin-project/lotus/pull/6444))
- updated configuration comments for docs ([filecoin-project/lotus#6440](https://github.com/filecoin-project/lotus/pull/6440))
- Set ntwk v13 HyperDrive Calibration upgrade epoch ([filecoin-project/lotus#6441](https://github.com/filecoin-project/lotus/pull/6441))
- Merge release/v1.10.10 into master ([filecoin-project/lotus#6439](https://github.com/filecoin-project/lotus/pull/6439))
- implement a command to export a car ([filecoin-project/lotus#6405](https://github.com/filecoin-project/lotus/pull/6405))
- Merge v1.10 release branch into master ([filecoin-project/lotus#6435](https://github.com/filecoin-project/lotus/pull/6435))
- Fee config for sector batching ([filecoin-project/lotus#6420](https://github.com/filecoin-project/lotus/pull/6420))
- Fix: correct the change of message size limit ([filecoin-project/lotus#6430](https://github.com/filecoin-project/lotus/pull/6430))
- UX: lotus state power CLI should fail if called with a not-miner ([filecoin-project/lotus#6425](https://github.com/filecoin-project/lotus/pull/6425))
- network reset friday
- Increase message size limit ([filecoin-project/lotus#6419](https://github.com/filecoin-project/lotus/pull/6419))
- polish(stmgr): define ExecMonitor for message application callback ([filecoin-project/lotus#6389](https://github.com/filecoin-project/lotus/pull/6389))
- upgrade testground action version ([filecoin-project/lotus#6403](https://github.com/filecoin-project/lotus/pull/6403))
- Fix logging of stringified CIDs double-encoded in hex ([filecoin-project/lotus#6413](https://github.com/filecoin-project/lotus/pull/6413))
- Update libp2p to 0.14.2 ([filecoin-project/lotus#6404](https://github.com/filecoin-project/lotus/pull/6404))
- Bypass task scheduler for reading unsealed pieces ([filecoin-project/lotus#6280](https://github.com/filecoin-project/lotus/pull/6280))
- testplans: lotus-soup: use default WPoStChallengeWindow ([filecoin-project/lotus#6400](https://github.com/filecoin-project/lotus/pull/6400))
- build snapcraft ([filecoin-project/lotus#6388](https://github.com/filecoin-project/lotus/pull/6388))
- Fix the doc errors of the sealing config funcs ([filecoin-project/lotus#6399](https://github.com/filecoin-project/lotus/pull/6399))
- Integration tests for offline deals ([filecoin-project/lotus#6081](https://github.com/filecoin-project/lotus/pull/6081))
- Fix success handling in Retreival ([filecoin-project/lotus#5921](https://github.com/filecoin-project/lotus/pull/5921))
- Fix some flaky tests ([filecoin-project/lotus#6397](https://github.com/filecoin-project/lotus/pull/6397))
- build appimage in CI ([filecoin-project/lotus#6384](https://github.com/filecoin-project/lotus/pull/6384))
- Add doc on gas balancing ([filecoin-project/lotus#6392](https://github.com/filecoin-project/lotus/pull/6392))
- Add a command to list retrievals ([filecoin-project/lotus#6337](https://github.com/filecoin-project/lotus/pull/6337))
- Add interop network ([filecoin-project/lotus#6387](https://github.com/filecoin-project/lotus/pull/6387))
- Network version 13 (v1.11) ([filecoin-project/lotus#6342](https://github.com/filecoin-project/lotus/pull/6342))
- Generate AppImage ([filecoin-project/lotus#6208](https://github.com/filecoin-project/lotus/pull/6208))
- lotus-gateway: add check command ([filecoin-project/lotus#6373](https://github.com/filecoin-project/lotus/pull/6373))
- Add a warning to the release issue template ([filecoin-project/lotus#6374](https://github.com/filecoin-project/lotus/pull/6374))
- update to markets-v1.4.0 ([filecoin-project/lotus#6369](https://github.com/filecoin-project/lotus/pull/6369))
- Add test for AddVerifiedClient ([filecoin-project/lotus#6317](https://github.com/filecoin-project/lotus/pull/6317))
- Typo fix in error message: "pubusb" -> "pubsub" ([filecoin-project/lotus#6365](https://github.com/filecoin-project/lotus/pull/6365))
- Improve the cli state call command ([filecoin-project/lotus#6226](https://github.com/filecoin-project/lotus/pull/6226))
- Upscale mineOne message to a WARN on unexpected ineligibility ([filecoin-project/lotus#6358](https://github.com/filecoin-project/lotus/pull/6358))
- storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6041](https://github.com/filecoin-project/lotus/pull/6041))
- Remove few useless variable assignments ([filecoin-project/lotus#6359](https://github.com/filecoin-project/lotus/pull/6359))
- lotus-wallet: JWT Support ([filecoin-project/lotus#6360](https://github.com/filecoin-project/lotus/pull/6360))
- Reduce noise from 'peer has different genesis' messages ([filecoin-project/lotus#6357](https://github.com/filecoin-project/lotus/pull/6357))
- events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6355](https://github.com/filecoin-project/lotus/pull/6355))
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6236](https://github.com/filecoin-project/lotus/pull/6236))
- Get current seal proof when necessary ([filecoin-project/lotus#6339](https://github.com/filecoin-project/lotus/pull/6339))
- Allow starting networks from arbitrary actor versions ([filecoin-project/lotus#6333](https://github.com/filecoin-project/lotus/pull/6333))
- Remove log line when tracing is not configured ([filecoin-project/lotus#6334](https://github.com/filecoin-project/lotus/pull/6334))
- Revert "Allow starting networks from arbitrary actor versions" ([filecoin-project/lotus#6330](https://github.com/filecoin-project/lotus/pull/6330))
- separate tracing environment variables ([filecoin-project/lotus#6323](https://github.com/filecoin-project/lotus/pull/6323))
- Allow starting networks from arbitrary actor versions ([filecoin-project/lotus#6305](https://github.com/filecoin-project/lotus/pull/6305))
- feat: log dispute rate ([filecoin-project/lotus#6322](https://github.com/filecoin-project/lotus/pull/6322))
- Use new actor tags ([filecoin-project/lotus#6291](https://github.com/filecoin-project/lotus/pull/6291))
- Fix logging around mineOne ([filecoin-project/lotus#6310](https://github.com/filecoin-project/lotus/pull/6310))
- Fix shell completions ([filecoin-project/lotus#6316](https://github.com/filecoin-project/lotus/pull/6316))
- Allow 8MB sectors in devnet ([filecoin-project/lotus#6312](https://github.com/filecoin-project/lotus/pull/6312))
- fix ticket expired ([filecoin-project/lotus#6304](https://github.com/filecoin-project/lotus/pull/6304))
- oh, snap! ([filecoin-project/lotus#6202](https://github.com/filecoin-project/lotus/pull/6202))
- Move verifreg shed utils to CLI ([filecoin-project/lotus#6135](https://github.com/filecoin-project/lotus/pull/6135))
- consider storiface.PathStorage when calculating storage requirements ([filecoin-project/lotus#6233](https://github.com/filecoin-project/lotus/pull/6233))
- `storage` module: add go docs and minor code quality refactors ([filecoin-project/lotus#6259](https://github.com/filecoin-project/lotus/pull/6259))
- Revert "chore: update go-libp2p" ([filecoin-project/lotus#6306](https://github.com/filecoin-project/lotus/pull/6306))
- Increase data transfer timeouts ([filecoin-project/lotus#6300](https://github.com/filecoin-project/lotus/pull/6300))
- gateway: spin off from cmd to package ([filecoin-project/lotus#6294](https://github.com/filecoin-project/lotus/pull/6294))
- Update to markets 1.3 ([filecoin-project/lotus#6149](https://github.com/filecoin-project/lotus/pull/6149))
- Add a shed util to count 64 GiB miner stats ([filecoin-project/lotus#6290](https://github.com/filecoin-project/lotus/pull/6290))
- Delete CODEOWNERS ([filecoin-project/lotus#6289](https://github.com/filecoin-project/lotus/pull/6289))
- Merge v1.9.0 to master ([filecoin-project/lotus#6275](https://github.com/filecoin-project/lotus/pull/6275))
- Backport 6200 to master ([filecoin-project/lotus#6272](https://github.com/filecoin-project/lotus/pull/6272))
- Introduce stateless offline dealflow, bypassing the FSM/deallists ([filecoin-project/lotus#5961](https://github.com/filecoin-project/lotus/pull/5961))
- chore: update go-libp2p ([filecoin-project/lotus#6231](https://github.com/filecoin-project/lotus/pull/6231))
- fix: wait-api should use GetAPI to acquire binary specific API ([filecoin-project/lotus#6246](https://github.com/filecoin-project/lotus/pull/6246))
- Update RELEASE_ISSUE_TEMPLATE.md
- fix(ci): Updates to lotus CI build process ([filecoin-project/lotus#6256](https://github.com/filecoin-project/lotus/pull/6256))
- add flags to control gateway lookback parameters ([filecoin-project/lotus#6247](https://github.com/filecoin-project/lotus/pull/6247))
- Feat/nerpa v4 ([filecoin-project/lotus#6248](https://github.com/filecoin-project/lotus/pull/6248))
- chore(ci): Enable build on RC tags ([filecoin-project/lotus#6238](https://github.com/filecoin-project/lotus/pull/6238))
- Transplant some useful commands to lotus-shed actor ([filecoin-project/lotus#5913](https://github.com/filecoin-project/lotus/pull/5913))
- wip actor wrapper codegen ([filecoin-project/lotus#6108](https://github.com/filecoin-project/lotus/pull/6108))
- Robust message management ([filecoin-project/lotus#5822](https://github.com/filecoin-project/lotus/pull/5822))
- Add a shed util to count miners by post type ([filecoin-project/lotus#6169](https://github.com/filecoin-project/lotus/pull/6169))
- Introduce a release issue template ([filecoin-project/lotus#5826](https://github.com/filecoin-project/lotus/pull/5826))
- cron-wc ([filecoin-project/lotus#6178](https://github.com/filecoin-project/lotus/pull/6178))
- This is a 1:1 forward-port of PR#6183 from 1.9.x to master ([filecoin-project/lotus#6196](https://github.com/filecoin-project/lotus/pull/6196))
- Allow creation of state tree v3s ([filecoin-project/lotus#6167](https://github.com/filecoin-project/lotus/pull/6167))
- drand: fix beacon cache ([filecoin-project/lotus#6164](https://github.com/filecoin-project/lotus/pull/6164))
- Update cli gen ([filecoin-project/lotus#6155](https://github.com/filecoin-project/lotus/pull/6155))
- mpool: Cleanup pre-nv12 selection logic ([filecoin-project/lotus#6148](https://github.com/filecoin-project/lotus/pull/6148))
- Update ffi to proofs v7 ([filecoin-project/lotus#6150](https://github.com/filecoin-project/lotus/pull/6150))
- Generate CLI docs ([filecoin-project/lotus#6145](https://github.com/filecoin-project/lotus/pull/6145))
- feat: allow checkpointing to forks ([filecoin-project/lotus#6107](https://github.com/filecoin-project/lotus/pull/6107))
- attempt to do better padding on pieces being written into sectors ([filecoin-project/lotus#5988](https://github.com/filecoin-project/lotus/pull/5988))
- remove duplicate ask and calculate ping before lock ([filecoin-project/lotus#5968](https://github.com/filecoin-project/lotus/pull/5968))
- Add a command to get the fees of a deal ([filecoin-project/lotus#5307](https://github.com/filecoin-project/lotus/pull/5307))
- flaky tests improvement: separate TestBatchDealInput from TestAPIDealFlow ([filecoin-project/lotus#6141](https://github.com/filecoin-project/lotus/pull/6141))
- Testground checks on push ([filecoin-project/lotus#5887](https://github.com/filecoin-project/lotus/pull/5887))
- Add a CLI tool for miner proving deadline ([filecoin-project/lotus#6132](https://github.com/filecoin-project/lotus/pull/6132))
- Use EmptyTSK where appropriate ([filecoin-project/lotus#6134](https://github.com/filecoin-project/lotus/pull/6134))
- fix: use a consistent tipset in commands ([filecoin-project/lotus#6142](https://github.com/filecoin-project/lotus/pull/6142))
- go mod tidy for lotus-soup testplans ([filecoin-project/lotus#6124](https://github.com/filecoin-project/lotus/pull/6124))
- fix testground payment channel tests: use 1 miner ([filecoin-project/lotus#6126](https://github.com/filecoin-project/lotus/pull/6126))
- fix: use the parent state when listing actors ([filecoin-project/lotus#6143](https://github.com/filecoin-project/lotus/pull/6143))
- Speed up StateListMessages in some cases ([filecoin-project/lotus#6007](https://github.com/filecoin-project/lotus/pull/6007))
- Return total power when GetPowerRaw doesn't find miner claim ([filecoin-project/lotus#4938](https://github.com/filecoin-project/lotus/pull/4938))
- fix(splitstore): fix a panic on revert-only head changes ([filecoin-project/lotus#6133](https://github.com/filecoin-project/lotus/pull/6133))
- shed: command to list duplicate messages in tipsets (steb) ([filecoin-project/lotus#5847](https://github.com/filecoin-project/lotus/pull/5847))
- upgrade `lotus-soup` testplans and reduce deals concurrency to a single miner ([filecoin-project/lotus#6122](https://github.com/filecoin-project/lotus/pull/6122))
- Merge releases (1.8.0) into master ([filecoin-project/lotus#6118](https://github.com/filecoin-project/lotus/pull/6118))
- github.com/filecoin-project/go-commp-utils (v0.1.0 -> v0.1.1-0.20210427191551-70bf140d31c7):
- add a padding helper function ([filecoin-project/go-commp-utils#3](https://github.com/filecoin-project/go-commp-utils/pull/3))
- github.com/filecoin-project/go-data-transfer (v1.4.3 -> v1.6.0):
- release: v1.6.0
- fix: option to disable accept and complete timeouts
- fix: disable restart ack timeout
- release: v1.5.0
- Add isRestart param to validators (#197) ([filecoin-project/go-data-transfer#197](https://github.com/filecoin-project/go-data-transfer/pull/197))
- fix: flaky TestChannelMonitorAutoRestart (#198) ([filecoin-project/go-data-transfer#198](https://github.com/filecoin-project/go-data-transfer/pull/198))
- Channel monitor watches for errors instead of measuring data rate (#190) ([filecoin-project/go-data-transfer#190](https://github.com/filecoin-project/go-data-transfer/pull/190))
- fix: prevent concurrent restarts for same channel (#195) ([filecoin-project/go-data-transfer#195](https://github.com/filecoin-project/go-data-transfer/pull/195))
- fix: channel state machine event handling (#194) ([filecoin-project/go-data-transfer#194](https://github.com/filecoin-project/go-data-transfer/pull/194))
- Dont double count data sent (#185) ([filecoin-project/go-data-transfer#185](https://github.com/filecoin-project/go-data-transfer/pull/185))
- release: v1.4.3 (#189) ([filecoin-project/go-data-transfer#189](https://github.com/filecoin-project/go-data-transfer/pull/189))
- github.com/filecoin-project/go-fil-markets (v1.2.5 -> v1.5.0):
- release: v1.5.0
- Dynamic Retrieval Pricing (#542) ([filecoin-project/go-fil-markets#542](https://github.com/filecoin-project/go-fil-markets/pull/542))
- release: v1.4.0 (#551) ([filecoin-project/go-fil-markets#551](https://github.com/filecoin-project/go-fil-markets/pull/551))
- Update to go data transfer v1.6.0 (#550) ([filecoin-project/go-fil-markets#550](https://github.com/filecoin-project/go-fil-markets/pull/550))
- fix first make error (#548) ([filecoin-project/go-fil-markets#548](https://github.com/filecoin-project/go-fil-markets/pull/548))
- release: v1.3.0 (#544) ([filecoin-project/go-fil-markets#544](https://github.com/filecoin-project/go-fil-markets/pull/544))
- fix restarts during data transfer for a retrieval deal (#540) ([filecoin-project/go-fil-markets#540](https://github.com/filecoin-project/go-fil-markets/pull/540))
- Test Retrieval for offline deals (#541) ([filecoin-project/go-fil-markets#541](https://github.com/filecoin-project/go-fil-markets/pull/541))
- Allow anonymous submodule checkout (#535) ([filecoin-project/go-fil-markets#535](https://github.com/filecoin-project/go-fil-markets/pull/535))
- github.com/filecoin-project/specs-actors (v0.9.13 -> v0.9.14):
- Set ConsensusMinerMinPower to 10 TiB (#1427) ([filecoin-project/specs-actors#1427](https://github.com/filecoin-project/specs-actors/pull/1427))
- github.com/filecoin-project/specs-actors/v2 (v2.3.5-0.20210114162132-5b58b773f4fb -> v2.3.5):
- Set ConsensusMinerMinPower to 10 TiB (#1428) ([filecoin-project/specs-actors#1428](https://github.com/filecoin-project/specs-actors/pull/1428))
- v2 VM satisfies SimVM interface (#1355) ([filecoin-project/specs-actors#1355](https://github.com/filecoin-project/specs-actors/pull/1355))
- github.com/filecoin-project/specs-actors/v3 (v3.1.0 -> v3.1.1):
- Set ConsensusMinerMinPower to 10 TiB for all PoStProofPolicies (#1429) ([filecoin-project/specs-actors#1429](https://github.com/filecoin-project/specs-actors/pull/1429))
- github.com/filecoin-project/specs-actors/v4 (v4.0.0 -> v4.0.1):
- Set ConsensusMinerMinPower to 10 TiB for all PoStProofPolicies (#1430) ([filecoin-project/specs-actors#1430](https://github.com/filecoin-project/specs-actors/pull/1430))
## New Features
- commit batch: AggregateAboveBaseFee config #6650
- `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation!
## Bug Fixes
- storage: Fix FinalizeSector with sectors in storage paths #6652
- Fix tiny error in check-client-datacap #6664
- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658
- to optimize the batchwait #6636
- fix getTicket: sector precommitted but expired case #6635
- handleSubmitCommitAggregate() exception handling #6595
- remove precommit check in handleCommitFailed #6634
- ensure agg fee is adequate
- fix: miner balance is not enough, so that ProveCommitAggregate msg exec failed #6623
- commit batch: Initialize the FailedSectors map #6647
Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Raúl Kripalani | 118 | +11972/-10860 | 472 |
| Łukasz Magiera | 65 | +10824/-4158 | 353 |
| aarshkshah1992 | 59 | +8057/-3355 | 224 |
| Aayush Rajasekaran | 41 | +8786/-1691 | 331 |
| Steven Allen | 106 | +7653/-2718 | 273 |
| dirkmc | 11 | +2580/-1371 | 77 |
| Dirk McCormick | 39 | +1865/-1194 | 79 |
| Jakub Sztandera | 19 | +1973/-485 | 81 |
| vyzo | 4 | +1748/-330 | 50 |
| Aarsh Shah | 5 | +1462/-213 | 27 |
| Cory Schwartz | 35 | +568/-206 | 59 |
| chadwick2143 | 3 | +739/-1 | 4 |
| Peter Rabbitson | 21 | +487/-164 | 36 |
| hannahhoward | 5 | +544/-5 | 19 |
| Jennifer Wang | 8 | +206/-172 | 17 |
| frrist | 1 | +137/-88 | 7 |
| Travis Person | 3 | +175/-6 | 7 |
| Alex Wade | 1 | +48/-129 | 1 |
| whyrusleeping | 8 | +161/-13 | 11 |
| lotus | 1 | +114/-46 | 1 |
| Anton Evangelatov | 8 | +107/-53 | 20 |
| Rjan | 4 | +115/-33 | 4 |
| ZenGround0 | 3 | +114/-1 | 4 |
| Aloxaf | 1 | +43/-61 | 7 |
| yaohcn | 4 | +89/-9 | 5 |
| mitchellsoo | 1 | +51/-0 | 1 |
| Mike Greenberg | 3 | +28/-18 | 4 |
| Jennifer | 6 | +9/-14 | 6 |
| Frank | 2 | +11/-10 | 2 |
| wangchao | 3 | +5/-4 | 4 |
| Steve Loeppky | 1 | +7/-1 | 1 |
| Lion | 1 | +4/-2 | 1 |
| Mimir | 1 | +2/-2 | 1 |
| raulk | 1 | +1/-1 | 1 |
| Jack Yao | 1 | +1/-1 | 1 |
| IPFSUnion | 1 | +1/-1 | 1 |
| @magik6k| 7 | +151/-56 | 21 |
| @llifezou | 4 | +59/-20 | 4 |
| @johnli-helloworld | 2 | +45/-14 | 4 |
| @wangchao | 1 | +1/-27 | 1 |
| Jerry | 2 | +9/-4 | 2 |
| @zhoutian527 | 1 | +2/-2 | 1 |
| @ribasushi| 1 | +1/-1 | 1 |
# 1.10.0 / 2021-06-23

View File

@ -131,6 +131,9 @@ install-miner:
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
install-app:
install -C ./$(APP) /usr/local/bin/$(APP)
# TOOLS
lotus-seed: $(BUILD_DEPS)

View File

@ -4,15 +4,11 @@ import (
"context"
"fmt"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/google/uuid"
"github.com/filecoin-project/go-jsonrpc/auth"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
apitypes "github.com/filecoin-project/lotus/api/types"
)
// MODIFYING THE API INTERFACE
@ -27,55 +23,23 @@ import (
// * Generate openrpc blobs
type Common interface {
// MethodGroup: Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
// MethodGroup: Net
// MethodGroup: Log
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// MethodGroup: Common
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read
// Version provides information about API provider
Version(context.Context) (APIVersion, error) //perm:read
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// trigger graceful shutdown
Shutdown(context.Context) error //perm:admin
@ -105,8 +69,3 @@ type APIVersion struct {
func (v APIVersion) String() string {
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
}
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
}

View File

@ -58,6 +58,7 @@ const LookbackNoLimit = abi.ChainEpoch(-1)
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the

View File

@ -45,6 +45,7 @@ type Gateway interface {
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)

66
api/api_net.go Normal file
View File

@ -0,0 +1,66 @@
package api
import (
"context"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Net interface {
// MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read
}
type CommonNet interface {
Common
Net
}
type NatInfo struct {
Reachability network.Reachability
PublicAddr string
}

View File

@ -41,6 +41,7 @@ import (
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
Net
ActorAddress(context.Context) (address.Address, error) //perm:read

View File

@ -16,14 +16,10 @@ import (
)
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res v0api.CommonStruct
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
requestHeader,
)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -31,11 +27,9 @@ func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header)
// NewFullNodeRPCV0 creates a new http jsonrpc client.
func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
var res v0api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -44,10 +38,7 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res v1api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
api.GetInternalStructs(&res), requestHeader)
return &res, closer, err
}
@ -78,15 +69,10 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H
var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
},
requestHeader,
api.GetInternalStructs(&res), requestHeader,
append([]jsonrpc.Option{
rpcenc.ReaderParamEncoder(pushUrl),
}, opts...)...,
)
}, opts...)...)
return &res, closer, err
}
@ -99,9 +85,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
var res api.WorkerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithNoReconnect(),
@ -115,9 +99,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
var res api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
opts...,
)
@ -129,9 +111,7 @@ func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header
func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
var res v0api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
opts...,
)
@ -142,9 +122,7 @@ func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header
func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
var res api.WalletStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
[]interface{}{
&res.Internal,
},
api.GetInternalStructs(&res),
requestHeader,
)

View File

@ -34,7 +34,7 @@ func main() {
doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
i, _, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
doc.RegisterReceiverName("Filecoin", i)
out, err := doc.Discover()

View File

@ -4,6 +4,7 @@ import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
@ -15,7 +16,7 @@ func main() {
groups := make(map[string]*docgen.MethodGroup)
_, t, permStruct, commonPermStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
_, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
@ -88,13 +89,17 @@ func main() {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
meth, ok := permStruct.FieldByName(m.Name)
if !ok {
meth, ok = commonPermStruct.FieldByName(m.Name)
var meth reflect.StructField
var ok bool
for _, ps := range permStruct {
meth, ok = ps.FieldByName(m.Name)
if ok {
break
}
}
if !ok {
panic("no perms for method: " + m.Name)
}
}
perms := meth.Tag.Get("perm")

View File

@ -266,25 +266,27 @@ func init() {
addExample(map[string]interface{}{"abc": 123})
}
func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) {
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
switch pkg {
case "api": // latest
switch name {
case "FullNode":
i = &api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = reflect.TypeOf(api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "StorageMiner":
i = &api.StorageMinerStruct{}
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = reflect.TypeOf(api.StorageMinerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "Worker":
i = &api.WorkerStruct{}
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
permStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
commonPermStruct = reflect.TypeOf(api.WorkerStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
default:
panic("unknown type")
}
@ -293,8 +295,9 @@ func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruc
case "FullNode":
i = v0api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
permStruct = reflect.TypeOf(v0api.FullNodeStruct{}.Internal)
commonPermStruct = reflect.TypeOf(v0api.CommonStruct{}.Internal)
permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal))
default:
panic("unknown type")
}

View File

@ -16,28 +16,33 @@ const (
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
var DefaultPerms = []auth.Permission{PermRead}
func permissionedProxies(in, out interface{}) {
outs := GetInternalStructs(out)
for _, o := range outs {
auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o)
}
}
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
var out StorageMinerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedWorkerAPI(a Worker) Worker {
var out WorkerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
permissionedProxies(a, &out)
return &out
}
func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
permissionedProxies(a, &out)
return &out
}

View File

@ -35,7 +35,7 @@ import (
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-core/protocol"
xerrors "golang.org/x/xerrors"
)
@ -60,44 +60,10 @@ type CommonStruct struct {
Discover func(p0 context.Context) (apitypes.OpenRPCDocument, error) `perm:"read"`
ID func(p0 context.Context) (peer.ID, error) `perm:"read"`
LogList func(p0 context.Context) ([]string, error) `perm:"write"`
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"write"`
NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"`
NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"`
NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"`
NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"`
NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"`
NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"`
NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"`
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
Session func(p0 context.Context) (uuid.UUID, error) `perm:"read"`
Shutdown func(p0 context.Context) error `perm:"admin"`
@ -109,9 +75,26 @@ type CommonStruct struct {
type CommonStub struct {
}
type CommonNetStruct struct {
CommonStruct
NetStruct
Internal struct {
}
}
type CommonNetStub struct {
CommonStub
NetStub
}
type FullNodeStruct struct {
CommonStruct
NetStruct
Internal struct {
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@ -473,6 +456,8 @@ type FullNodeStruct struct {
type FullNodeStub struct {
CommonStub
NetStub
}
type GatewayStruct struct {
@ -525,6 +510,8 @@ type GatewayStruct struct {
StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (apitypes.NetworkVersion, error) ``
StateReadState func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) `perm:"read"`
StateSearchMsg func(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
StateSectorGetInfo func(p0 context.Context, p1 address.Address, p2 abi.SectorNumber, p3 types.TipSetKey) (*miner.SectorOnChainInfo, error) ``
@ -542,6 +529,47 @@ type GatewayStruct struct {
type GatewayStub struct {
}
type NetStruct struct {
Internal struct {
ID func(p0 context.Context) (peer.ID, error) `perm:"read"`
NetAddrsListen func(p0 context.Context) (peer.AddrInfo, error) `perm:"read"`
NetAgentVersion func(p0 context.Context, p1 peer.ID) (string, error) `perm:"read"`
NetAutoNatStatus func(p0 context.Context) (NatInfo, error) `perm:"read"`
NetBandwidthStats func(p0 context.Context) (metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByPeer func(p0 context.Context) (map[string]metrics.Stats, error) `perm:"read"`
NetBandwidthStatsByProtocol func(p0 context.Context) (map[protocol.ID]metrics.Stats, error) `perm:"read"`
NetBlockAdd func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetBlockList func(p0 context.Context) (NetBlockList, error) `perm:"read"`
NetBlockRemove func(p0 context.Context, p1 NetBlockList) error `perm:"admin"`
NetConnect func(p0 context.Context, p1 peer.AddrInfo) error `perm:"write"`
NetConnectedness func(p0 context.Context, p1 peer.ID) (network.Connectedness, error) `perm:"read"`
NetDisconnect func(p0 context.Context, p1 peer.ID) error `perm:"write"`
NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"`
NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"`
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"`
}
}
type NetStub struct {
}
type SignableStruct struct {
Internal struct {
Sign func(p0 context.Context, p1 SignFunc) error ``
@ -554,6 +582,8 @@ type SignableStub struct {
type StorageMinerStruct struct {
CommonStruct
NetStruct
Internal struct {
ActorAddress func(p0 context.Context) (address.Address, error) `perm:"read"`
@ -747,6 +777,8 @@ type StorageMinerStruct struct {
type StorageMinerStub struct {
CommonStub
NetStub
}
type WalletStruct struct {
@ -871,14 +903,6 @@ func (s *CommonStub) Discover(p0 context.Context) (apitypes.OpenRPCDocument, err
return *new(apitypes.OpenRPCDocument), xerrors.New("method not supported")
}
func (s *CommonStruct) ID(p0 context.Context) (peer.ID, error) {
return s.Internal.ID(p0)
}
func (s *CommonStub) ID(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), xerrors.New("method not supported")
}
func (s *CommonStruct) LogList(p0 context.Context) ([]string, error) {
return s.Internal.LogList(p0)
}
@ -895,134 +919,6 @@ func (s *CommonStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return s.Internal.NetAddrsListen(p0)
}
func (s *CommonStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return s.Internal.NetAgentVersion(p0, p1)
}
func (s *CommonStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return "", xerrors.New("method not supported")
}
func (s *CommonStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return s.Internal.NetAutoNatStatus(p0)
}
func (s *CommonStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return *new(NatInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return s.Internal.NetBandwidthStats(p0)
}
func (s *CommonStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return *new(metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByPeer(p0)
}
func (s *CommonStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return *new(map[string]metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByProtocol(p0)
}
func (s *CommonStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockAdd(p0, p1)
}
func (s *CommonStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockList(p0 context.Context) (NetBlockList, error) {
return s.Internal.NetBlockList(p0)
}
func (s *CommonStub) NetBlockList(p0 context.Context) (NetBlockList, error) {
return *new(NetBlockList), xerrors.New("method not supported")
}
func (s *CommonStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockRemove(p0, p1)
}
func (s *CommonStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return s.Internal.NetConnect(p0, p1)
}
func (s *CommonStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return s.Internal.NetConnectedness(p0, p1)
}
func (s *CommonStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return *new(network.Connectedness), xerrors.New("method not supported")
}
func (s *CommonStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return s.Internal.NetDisconnect(p0, p1)
}
func (s *CommonStub) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return xerrors.New("method not supported")
}
func (s *CommonStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return s.Internal.NetFindPeer(p0, p1)
}
func (s *CommonStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return s.Internal.NetPeerInfo(p0, p1)
}
func (s *CommonStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *CommonStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return s.Internal.NetPeers(p0)
}
func (s *CommonStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return *new([]peer.AddrInfo), xerrors.New("method not supported")
}
func (s *CommonStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return s.Internal.NetPubsubScores(p0)
}
func (s *CommonStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return *new([]PubsubScore), xerrors.New("method not supported")
}
func (s *CommonStruct) Session(p0 context.Context) (uuid.UUID, error) {
return s.Internal.Session(p0)
}
@ -2663,6 +2559,14 @@ func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey
return *new(apitypes.NetworkVersion), xerrors.New("method not supported")
}
func (s *GatewayStruct) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
return s.Internal.StateReadState(p0, p1, p2)
}
func (s *GatewayStub) StateReadState(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*ActorState, error) {
return nil, xerrors.New("method not supported")
}
func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 types.TipSetKey, p2 cid.Cid, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) {
return s.Internal.StateSearchMsg(p0, p1, p2, p3, p4)
}
@ -2711,6 +2615,142 @@ func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (typ
return *new(types.BigInt), xerrors.New("method not supported")
}
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
return s.Internal.ID(p0)
}
func (s *NetStub) ID(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), xerrors.New("method not supported")
}
func (s *NetStruct) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return s.Internal.NetAddrsListen(p0)
}
func (s *NetStub) NetAddrsListen(p0 context.Context) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return s.Internal.NetAgentVersion(p0, p1)
}
func (s *NetStub) NetAgentVersion(p0 context.Context, p1 peer.ID) (string, error) {
return "", xerrors.New("method not supported")
}
func (s *NetStruct) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return s.Internal.NetAutoNatStatus(p0)
}
func (s *NetStub) NetAutoNatStatus(p0 context.Context) (NatInfo, error) {
return *new(NatInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return s.Internal.NetBandwidthStats(p0)
}
func (s *NetStub) NetBandwidthStats(p0 context.Context) (metrics.Stats, error) {
return *new(metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByPeer(p0)
}
func (s *NetStub) NetBandwidthStatsByPeer(p0 context.Context) (map[string]metrics.Stats, error) {
return *new(map[string]metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return s.Internal.NetBandwidthStatsByProtocol(p0)
}
func (s *NetStub) NetBandwidthStatsByProtocol(p0 context.Context) (map[protocol.ID]metrics.Stats, error) {
return *new(map[protocol.ID]metrics.Stats), xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockAdd(p0, p1)
}
func (s *NetStub) NetBlockAdd(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockList(p0 context.Context) (NetBlockList, error) {
return s.Internal.NetBlockList(p0)
}
func (s *NetStub) NetBlockList(p0 context.Context) (NetBlockList, error) {
return *new(NetBlockList), xerrors.New("method not supported")
}
func (s *NetStruct) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return s.Internal.NetBlockRemove(p0, p1)
}
func (s *NetStub) NetBlockRemove(p0 context.Context, p1 NetBlockList) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return s.Internal.NetConnect(p0, p1)
}
func (s *NetStub) NetConnect(p0 context.Context, p1 peer.AddrInfo) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return s.Internal.NetConnectedness(p0, p1)
}
func (s *NetStub) NetConnectedness(p0 context.Context, p1 peer.ID) (network.Connectedness, error) {
return *new(network.Connectedness), xerrors.New("method not supported")
}
func (s *NetStruct) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return s.Internal.NetDisconnect(p0, p1)
}
func (s *NetStub) NetDisconnect(p0 context.Context, p1 peer.ID) error {
return xerrors.New("method not supported")
}
func (s *NetStruct) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return s.Internal.NetFindPeer(p0, p1)
}
func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) {
return *new(peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return s.Internal.NetPeerInfo(p0, p1)
}
func (s *NetStub) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *NetStruct) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return s.Internal.NetPeers(p0)
}
func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
return *new([]peer.AddrInfo), xerrors.New("method not supported")
}
func (s *NetStruct) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return s.Internal.NetPubsubScores(p0)
}
func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) {
return *new([]PubsubScore), xerrors.New("method not supported")
}
func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error {
return s.Internal.Sign(p0, p1)
}
@ -3713,8 +3753,10 @@ func (s *WorkerStub) WaitQuiet(p0 context.Context) error {
var _ ChainIO = new(ChainIOStruct)
var _ Common = new(CommonStruct)
var _ CommonNet = new(CommonNetStruct)
var _ FullNode = new(FullNodeStruct)
var _ Gateway = new(GatewayStruct)
var _ Net = new(NetStruct)
var _ Signable = new(SignableStruct)
var _ StorageMiner = new(StorageMinerStruct)
var _ Wallet = new(WalletStruct)

30
api/proxy_util.go Normal file
View File

@ -0,0 +1,30 @@
package api
import "reflect"
var _internalField = "Internal"
// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct
func GetInternalStructs(in interface{}) []interface{} {
return getInternalStructs(reflect.ValueOf(in).Elem())
}
func getInternalStructs(rv reflect.Value) []interface{} {
var out []interface{}
internal := rv.FieldByName(_internalField)
ii := internal.Addr().Interface()
out = append(out, ii)
for i := 0; i < rv.NumField(); i++ {
if rv.Type().Field(i).Name == _internalField {
continue
}
sub := getInternalStructs(rv.Field(i))
out = append(out, sub...)
}
return out
}

62
api/proxy_util_test.go Normal file
View File

@ -0,0 +1,62 @@
package api
import (
"testing"
"github.com/stretchr/testify/require"
)
type StrA struct {
StrB
Internal struct {
A int
}
}
type StrB struct {
Internal struct {
B int
}
}
type StrC struct {
Internal struct {
Internal struct {
C int
}
}
}
func TestGetInternalStructs(t *testing.T) {
var proxy StrA
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 2)
sa := sts[0].(*struct{ A int })
sa.A = 3
sb := sts[1].(*struct{ B int })
sb.B = 4
require.Equal(t, 3, proxy.Internal.A)
require.Equal(t, 4, proxy.StrB.Internal.B)
}
func TestNestedInternalStructs(t *testing.T) {
var proxy StrC
// check that only the top-level internal struct gets picked up
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 1)
sa := sts[0].(*struct {
Internal struct {
C int
}
})
sa.Internal.C = 5
require.Equal(t, 5, proxy.Internal.Internal.C)
}

View File

@ -46,6 +46,7 @@ import (
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the

View File

@ -5,8 +5,15 @@ import (
)
type Common = api.Common
type Net = api.Net
type CommonNet = api.CommonNet
type CommonStruct = api.CommonStruct
type CommonStub = api.CommonStub
type NetStruct = api.NetStruct
type NetStub = api.NetStub
type CommonNetStruct = api.CommonNetStruct
type CommonNetStub = api.CommonNetStub
type StorageMiner = api.StorageMiner
type StorageMinerStruct = api.StorageMinerStruct

View File

@ -30,6 +30,8 @@ import (
type FullNodeStruct struct {
CommonStruct
NetStruct
Internal struct {
BeaconGetEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
@ -389,6 +391,8 @@ type FullNodeStruct struct {
type FullNodeStub struct {
CommonStub
NetStub
}
type GatewayStruct struct {

View File

@ -54,8 +54,8 @@ func VersionForType(nodeType NodeType) (Version, error) {
// semver versions of the rpc api exposed
var (
FullAPIVersion0 = newVer(1, 3, 0)
FullAPIVersion1 = newVer(2, 1, 0)
FullAPIVersion0 = newVer(1, 3, 1)
FullAPIVersion1 = newVer(2, 1, 1)
MinerAPIVersion0 = newVer(1, 2, 0)
WorkerAPIVersion0 = newVer(1, 1, 0)

View File

@ -71,6 +71,13 @@ type Config struct {
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
// and directly purges cold blocks.
DiscardColdBlocks bool
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
// It has the following semantics:
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
// - a positive integer indicates the number of finalities, outside the compaction boundary,
// for which messages will be retained in the hotstore.
HotStoreMessageRetention uint64
}
// ChainAccessor allows the Splitstore to access the chain. It will most likely
@ -128,6 +135,9 @@ type SplitStore struct {
txnRefsMx sync.Mutex
txnRefs map[cid.Cid]struct{}
txnMissing map[cid.Cid]struct{}
// registered protectors
protectors []func(func(cid.Cid) error) error
}
var _ bstore.Blockstore = (*SplitStore)(nil)
@ -520,6 +530,13 @@ func (s *SplitStore) Start(chain ChainAccessor) error {
return nil
}
func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) {
s.mx.Lock()
defer s.mx.Unlock()
s.protectors = append(s.protectors, protector)
}
func (s *SplitStore) Close() error {
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
// already closing

View File

@ -345,6 +345,30 @@ func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error {
})
}
func (s *SplitStore) applyProtectors() error {
s.mx.Lock()
defer s.mx.Unlock()
count := 0
for _, protect := range s.protectors {
err := protect(func(c cid.Cid) error {
s.trackTxnRef(c)
count++
return nil
})
if err != nil {
return xerrors.Errorf("error applynig protector: %w", err)
}
}
if count > 0 {
log.Infof("protected %d references through %d protectors", count, len(s.protectors))
}
return nil
}
// --- Compaction ---
// Compaction works transactionally with the following algorithm:
// - We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
@ -376,7 +400,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
currentEpoch := curTs.Height()
boundaryEpoch := currentEpoch - CompactionBoundary
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "compactionIndex", s.compactionIndex)
var inclMsgsEpoch abi.ChainEpoch
inclMsgsRange := abi.ChainEpoch(s.cfg.HotStoreMessageRetention) * build.Finality
if inclMsgsRange < boundaryEpoch {
inclMsgsEpoch = boundaryEpoch - inclMsgsRange
}
log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex)
markSet, err := s.markSetEnv.Create("live", s.markSetSize)
if err != nil {
@ -392,13 +422,21 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
// we are ready for concurrent marking
s.beginTxnMarking(markSet)
// 0. track all protected references at beginning of compaction; anything added later should
// be transactionally protected by the write
log.Info("protecting references with registered protectors")
err = s.applyProtectors()
if err != nil {
return err
}
// 1. mark reachable objects by walking the chain from the current epoch; we keep state roots
// and messages until the boundary epoch.
log.Info("marking reachable objects")
startMark := time.Now()
var count int64
err = s.walkChain(curTs, boundaryEpoch, true,
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch,
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk
@ -593,7 +631,7 @@ func (s *SplitStore) endTxnProtect() {
s.txnMissing = nil
}
func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMsgs bool,
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
f func(cid.Cid) error) error {
visited := cid.NewSet()
walked := cid.NewSet()
@ -601,6 +639,8 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
walkCnt := 0
scanCnt := 0
stopWalk := func(_ cid.Cid) error { return errStopWalk }
walkBlock := func(c cid.Cid) error {
if !visited.Visit(c) {
return nil
@ -621,10 +661,19 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err)
}
// we only scan the block if it is at or above the boundary
if hdr.Height >= boundary || hdr.Height == 0 {
scanCnt++
if inclMsgs && hdr.Height > 0 {
// message are retained if within the inclMsgs boundary
if hdr.Height >= inclMsgs && hdr.Height > 0 {
if inclMsgs < inclState {
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
if err := s.walkObjectIncomplete(hdr.Messages, walked, f, stopWalk); err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, walked, f, stopWalk); err != nil {
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
} else {
if err := s.walkObject(hdr.Messages, walked, f); err != nil {
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
}
@ -633,10 +682,14 @@ func (s *SplitStore) walkChain(ts *types.TipSet, boundary abi.ChainEpoch, inclMs
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
}
}
}
// state is only retained if within the inclState boundary, with the exception of genesis
if hdr.Height >= inclState || hdr.Height == 0 {
if err := s.walkObject(hdr.ParentStateRoot, walked, f); err != nil {
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
}
scanCnt++
}
if hdr.Height > 0 {

View File

@ -63,6 +63,20 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Fatal(err)
}
// create a garbage block that is protected with a rgistered protector
protected := blocks.NewBlock([]byte("protected!"))
err = hot.Put(protected)
if err != nil {
t.Fatal(err)
}
// and another one that is not protected
unprotected := blocks.NewBlock([]byte("unprotected!"))
err = hot.Put(unprotected)
if err != nil {
t.Fatal(err)
}
// open the splitstore
ss, err := Open("", ds, hot, cold, cfg)
if err != nil {
@ -70,6 +84,11 @@ func testSplitStore(t *testing.T, cfg *Config) {
}
defer ss.Close() //nolint
// register our protector
ss.AddProtector(func(protect func(cid.Cid) error) error {
return protect(protected.Cid())
})
err = ss.Start(chain)
if err != nil {
t.Fatal(err)
@ -132,8 +151,8 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
}
if hotCnt != 10 {
t.Errorf("expected %d blocks, but got %d", 10, hotCnt)
if hotCnt != 12 {
t.Errorf("expected %d blocks, but got %d", 12, hotCnt)
}
// trigger a compaction
@ -146,12 +165,41 @@ func testSplitStore(t *testing.T, cfg *Config) {
coldCnt = countBlocks(cold)
hotCnt = countBlocks(hot)
if coldCnt != 5 {
t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt)
if coldCnt != 6 {
t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt)
}
if hotCnt != 17 {
t.Errorf("expected %d hot blocks, but got %d", 17, hotCnt)
if hotCnt != 18 {
t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt)
}
// ensure our protected block is still there
has, err := hot.Has(protected.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("protected block is missing from hotstore")
}
// ensure our unprotected block is in the coldstore now
has, err = hot.Has(unprotected.Cid())
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("unprotected block is still in hotstore")
}
has, err = cold.Has(unprotected.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("unprotected block is missing from coldstore")
}
// Make sure we can revert without panicking.

View File

@ -48,7 +48,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
count := int64(0)
xcount := int64(0)
missing := int64(0)
err := s.walkChain(curTs, epoch, false,
err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages/receipts in warmup
func(c cid.Cid) error {
if isUnitaryObject(c) {
return errStopWalk

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -28,18 +28,19 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
var UpgradeCalicoHeight = abi.ChainEpoch(-9)
var UpgradePersianHeight = abi.ChainEpoch(-10)
var UpgradeOrangeHeight = abi.ChainEpoch(-11)
var UpgradeClausHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -28,6 +28,7 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(30)
const UpgradeTapeHeight = 60
const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 119
const UpgradeCalicoHeight = 120
const UpgradePersianHeight = 150
const UpgradeClausHeight = 180

View File

@ -33,6 +33,8 @@ const UpgradeLiftoffHeight = -5
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 119
const UpgradeCalicoHeight = 120
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)

View File

@ -31,18 +31,19 @@ var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradePricelistOopsHeight = abi.ChainEpoch(-8)
var UpgradeCalicoHeight = abi.ChainEpoch(-9)
var UpgradePersianHeight = abi.ChainEpoch(-10)
var UpgradeOrangeHeight = abi.ChainEpoch(-11)
var UpgradeClausHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeTrustHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeTurboHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -45,6 +45,7 @@ const UpgradeLiftoffHeight = 148888
const UpgradeKumquatHeight = 170000
const UpgradePricelistOopsHeight = 265199
const UpgradeCalicoHeight = 265200
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)

View File

@ -32,6 +32,7 @@ const UpgradeTapeHeight = 60
const UpgradeKumquatHeight = 90
const UpgradePricelistOopsHeight = 99
const UpgradeCalicoHeight = 100
const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1)

View File

@ -89,14 +89,15 @@ var (
UpgradeAssemblyHeight abi.ChainEpoch = 10
UpgradeLiftoffHeight abi.ChainEpoch = -5
UpgradeKumquatHeight abi.ChainEpoch = -6
UpgradeCalicoHeight abi.ChainEpoch = -7
UpgradePersianHeight abi.ChainEpoch = -8
UpgradeOrangeHeight abi.ChainEpoch = -9
UpgradeClausHeight abi.ChainEpoch = -10
UpgradeTrustHeight abi.ChainEpoch = -11
UpgradeNorwegianHeight abi.ChainEpoch = -12
UpgradeTurboHeight abi.ChainEpoch = -13
UpgradeHyperdriveHeight abi.ChainEpoch = -13
UpgradePricelistOopsHeight abi.ChainEpoch = -7
UpgradeCalicoHeight abi.ChainEpoch = -8
UpgradePersianHeight abi.ChainEpoch = -9
UpgradeOrangeHeight abi.ChainEpoch = -10
UpgradeClausHeight abi.ChainEpoch = -11
UpgradeTrustHeight abi.ChainEpoch = -12
UpgradeNorwegianHeight abi.ChainEpoch = -13
UpgradeTurboHeight abi.ChainEpoch = -14
UpgradeHyperdriveHeight abi.ChainEpoch = -15
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,

View File

@ -105,6 +105,7 @@ type State interface {
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
GetAllocatedSectors() (*bitfield.BitField, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)

View File

@ -164,6 +164,7 @@ type State interface {
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
GetAllocatedSectors() (*bitfield.BitField, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)

View File

@ -318,6 +318,15 @@ func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, e
return sectors, nil
}
func (s *state{{.v}}) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state0) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -309,6 +309,15 @@ func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state2) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state3) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state4) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state4) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -311,6 +311,15 @@ func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
return sectors, nil
}
func (s *state5) GetAllocatedSectors() (*bitfield.BitField, error) {
var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
return nil, err
}
return &allocatedSectors, nil
}
func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {

View File

@ -25,7 +25,7 @@ func VersionForNetwork(version network.Version) Version {
switch version {
case network.Version0, network.Version1, network.Version2, network.Version3:
return Version0
case network.Version4, network.Version5, network.Version6, network.Version7, network.Version8, network.Version9:
case network.Version4, network.Version5, network.Version6, network.Version6AndAHalf, network.Version7, network.Version8, network.Version9:
return Version2
case network.Version10, network.Version11:
return Version3

View File

@ -5,6 +5,9 @@ import (
"math"
"sync"
"github.com/filecoin-project/lotus/api"
lru "github.com/hashicorp/golang-lru"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/go-state-types/abi"
@ -464,14 +467,20 @@ type messageEvents struct {
lk sync.RWMutex
matchers map[triggerID]MsgMatchFunc
blockMsgLk sync.Mutex
blockMsgCache *lru.ARCCache
}
func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs EventAPI) messageEvents {
blsMsgCache, _ := lru.NewARC(500)
return messageEvents{
ctx: ctx,
cs: cs,
hcAPI: hcAPI,
matchers: make(map[triggerID]MsgMatchFunc),
blockMsgLk: sync.Mutex{},
blockMsgCache: blsMsgCache,
}
}
@ -515,14 +524,21 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes
seen := map[cid.Cid]struct{}{}
for _, tsb := range ts.Blocks() {
msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
me.blockMsgLk.Lock()
msgsI, ok := me.blockMsgCache.Get(tsb.Cid())
var err error
if !ok {
msgsI, err = me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid())
if err != nil {
log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err)
// this is quite bad, but probably better than missing all the other updates
me.blockMsgLk.Unlock()
continue
}
me.blockMsgCache.Add(tsb.Cid(), msgsI)
}
me.blockMsgLk.Unlock()
msgs := msgsI.(*api.BlockMessages)
for _, m := range msgs.BlsMessages {
_, ok := seen[m.Cid()]
if ok {

View File

@ -6,6 +6,8 @@ import (
"sync"
"testing"
"gotest.tools/assert"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
@ -44,25 +46,43 @@ type fakeCS struct {
tipsets map[types.TipSetKey]*types.TipSet
sub func(rev, app []*types.TipSet)
callNumberLk sync.Mutex
callNumber map[string]int
}
func (fcs *fakeCS) ChainHead(ctx context.Context) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainHead"] = fcs.callNumber["ChainHead"] + 1
panic("implement me")
}
func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetTipSet"] = fcs.callNumber["ChainGetTipSet"] + 1
return fcs.tipsets[key], nil
}
func (fcs *fakeCS) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["StateSearchMsg"] = fcs.callNumber["StateSearchMsg"] + 1
return nil, nil
}
func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["StateGetActor"] = fcs.callNumber["StateGetActor"] + 1
panic("Not Implemented")
}
func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetTipSetByHeight"] = fcs.callNumber["ChainGetTipSetByHeight"] + 1
panic("Not Implemented")
}
@ -113,6 +133,10 @@ func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msg
}
func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainNotify"] = fcs.callNumber["ChainNotify"] + 1
out := make(chan []*api.HeadChange, 1)
best, err := fcs.tsc.best()
if err != nil {
@ -143,6 +167,9 @@ func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error
}
func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) {
fcs.callNumberLk.Lock()
defer fcs.callNumberLk.Unlock()
fcs.callNumber["ChainGetBlockMessages"] = fcs.callNumber["ChainGetBlockMessages"] + 1
messages, ok := fcs.blkMsgs[blk]
if !ok {
return &api.BlockMessages{}, nil
@ -152,8 +179,8 @@ func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api
if !ok {
return &api.BlockMessages{}, nil
}
return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil
}
func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid {
@ -236,6 +263,7 @@ func TestAt(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -301,6 +329,7 @@ func TestAtDoubleTrigger(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -343,6 +372,7 @@ func TestAtNullTrigger(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -377,6 +407,7 @@ func TestAtNullConf(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -416,6 +447,7 @@ func TestAtStart(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -450,6 +482,7 @@ func TestAtStartConfidence(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -480,6 +513,7 @@ func TestAtChained(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -514,6 +548,7 @@ func TestAtChainedConfidence(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -548,6 +583,7 @@ func TestAtChainedConfidenceNull(t *testing.T) {
t: t,
h: 1,
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -586,6 +622,7 @@ func TestCalled(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -798,6 +835,7 @@ func TestCalledTimeout(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -837,6 +875,7 @@ func TestCalledTimeout(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -872,6 +911,7 @@ func TestCalledOrder(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -935,6 +975,7 @@ func TestCalledNull(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1000,6 +1041,7 @@ func TestRemoveTriggersOnMessage(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1090,6 +1132,7 @@ func TestStateChanged(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1178,6 +1221,7 @@ func TestStateChangedRevert(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1256,6 +1300,7 @@ func TestStateChangedTimeout(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
callNumber: map[string]int{},
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1295,6 +1340,7 @@ func TestStateChangedTimeout(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1331,6 +1377,7 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
@ -1382,3 +1429,24 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
fcs.advance(9, 1, nil)
}
func TestCachedSameBlock(t *testing.T) {
fcs := &fakeCS{
t: t,
h: 1,
msgs: map[cid.Cid]fakeMsg{},
blkMsgs: map[cid.Cid]cid.Cid{},
callNumber: map[string]int{},
tsc: newTSCache(2*build.ForkLengthThreshold, nil),
}
require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid)))
_ = NewEvents(context.Background(), fcs)
fcs.advance(0, 10, map[int]cid.Cid{})
assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 20, "expect call ChainGetBlockMessages %d but got ", 20, fcs.callNumber["ChainGetBlockMessages"])
fcs.advance(5, 10, map[int]cid.Cid{})
assert.Assert(t, fcs.callNumber["ChainGetBlockMessages"] == 30, "expect call ChainGetBlockMessages %d but got ", 30, fcs.callNumber["ChainGetBlockMessages"])
}

View File

@ -11,7 +11,6 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
)
@ -259,8 +258,14 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
Code: api.CheckStatusMessageValidity,
},
}
if err := m.ValidForBlockInclusion(0, build.NewestNetworkVersion); err != nil {
nv, err := mp.getNtwkVersion(epoch)
if err != nil {
check.OK = false
check.Err = fmt.Sprintf("error retrieving network version: %s", err.Error())
} else {
check.OK = true
}
if err := m.ValidForBlockInclusion(0, nv); err != nil {
check.OK = false
check.Err = fmt.Sprintf("syntactically invalid message: %s", err.Error())
} else {
@ -276,7 +281,7 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
// gas checks
// 4. Min Gas
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByVersion(nv).OnChainMessage(m.ChainLength())
check = api.MessageCheckStatus{
Cid: m.Cid(),

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
"github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
"github.com/ipfs/go-cid"
@ -29,6 +30,7 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm"
@ -147,6 +149,8 @@ type MessagePool struct {
minGasPrice types.BigInt
getNtwkVersion func(abi.ChainEpoch) (network.Version, error)
currentSize int
// pruneTrigger is a channel used to trigger a mempool pruning
@ -362,6 +366,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
if j == nil {
j = journal.NilJournal()
}
us := stmgr.DefaultUpgradeSchedule()
mp := &MessagePool{
ds: ds,
@ -373,6 +378,7 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
pending: make(map[address.Address]*msgSet),
keyCache: make(map[address.Address]address.Address),
minGasPrice: types.NewInt(0),
getNtwkVersion: us.GetNtwkVersion,
pruneTrigger: make(chan struct{}, 1),
pruneCooldown: make(chan struct{}, 1),
blsSigCache: cache,
@ -426,6 +432,27 @@ func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName, j journ
return mp, nil
}
func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error {
mp.lk.Lock()
defer mp.lk.Unlock()
for _, mset := range mp.pending {
for _, m := range mset.msgs {
err := f(m.Cid())
if err != nil {
return err
}
err = f(m.Message.Cid())
if err != nil {
return err
}
}
}
return nil
}
func (mp *MessagePool) resolveToKey(ctx context.Context, addr address.Address) (address.Address, error) {
// check the cache
a, f := mp.keyCache[addr]
@ -589,8 +616,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
// For non local messages, if the message cannot be included in the next 20 blocks it returns
// a (soft) validation error.
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
epoch := curTs.Height()
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength())
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
return false, xerrors.Errorf("message will not be included in a block: %w", err)

View File

@ -105,6 +105,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet)
func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) {
return cid.Undef, nil
}
func (tma *testMpoolAPI) IsLite() bool {
return false
}
@ -286,7 +287,7 @@ func TestCheckMessageBig(t *testing.T) {
From: from,
Value: types.NewInt(1),
Nonce: 0,
GasLimit: 50000000,
GasLimit: 60000000,
GasFeeCap: types.NewInt(100),
GasPremium: types.NewInt(1),
Params: make([]byte, 41<<10), // 41KiB payload

View File

@ -749,7 +749,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
}
curNonce++
minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
minGas := vm.PricelistByVersion(build.NewestNetworkVersion).OnChainMessage(m.ChainLength()).Total()
if m.Message.GasLimit < minGas {
break
}

View File

@ -161,6 +161,10 @@ func DefaultUpgradeSchedule() UpgradeSchedule {
Height: build.UpgradeKumquatHeight,
Network: network.Version6,
Migration: nil,
}, {
Height: build.UpgradePricelistOopsHeight,
Network: network.Version6AndAHalf,
Migration: nil,
}, {
Height: build.UpgradeCalicoHeight,
Network: network.Version7,
@ -292,6 +296,18 @@ func (us UpgradeSchedule) Validate() error {
return nil
}
func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, error) {
// Traverse from newest to oldest returning upgrade active during epoch e
for i := len(us) - 1; i >= 0; i-- {
u := us[i]
// u.Height is the last epoch before u.Network becomes the active version
if u.Height < e {
return u.Network, nil
}
}
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
}
func (sm *StateManager) handleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
retCid := root
var err error

View File

@ -17,6 +17,7 @@ import (
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/network"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
init2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/init"
@ -121,7 +122,7 @@ func TestForkHeightTriggers(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {
@ -250,7 +251,7 @@ func TestForkRefuseCall(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Expensive: true,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
@ -365,7 +366,7 @@ func TestForkPreMigration(t *testing.T) {
sm, err := NewStateManagerWithUpgradeSchedule(
cg.ChainStore(), UpgradeSchedule{{
Network: 1,
Network: network.Version1,
Height: testForkHeight,
Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor,
root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) {

View File

@ -1054,14 +1054,15 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
return xerrors.Errorf("failed to load base state tree: %w", err)
}
pl := vm.PricelistByEpoch(baseTs.Height())
nv := syncer.sm.GetNtwkVersion(ctx, b.Header.Height)
pl := vm.PricelistByVersion(nv)
var sumGasLimit int64
checkMsg := func(msg types.ChainMsg) error {
m := msg.VMMessage()
// Phase 1: syntactic validation, as defined in the spec
minGas := pl.OnChainMessage(msg.ChainLength())
if err := m.ValidForBlockInclusion(minGas.Total(), syncer.sm.GetNtwkVersion(ctx, b.Header.Height)); err != nil {
if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil {
return err
}
@ -1075,7 +1076,7 @@ func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock
// Phase 2: (Partial) semantic validation:
// the sender exists and is an account actor, and the nonces make sense
var sender address.Address
if syncer.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 {
if nv >= network.Version13 {
sender, err = st.LookupID(m.From)
if err != nil {
return err

View File

@ -3,12 +3,11 @@ package vm
import (
"fmt"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/network"
vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime"
proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
"github.com/ipfs/go-cid"
@ -80,8 +79,8 @@ type Pricelist interface {
OnVerifyConsensusFault() GasCharge
}
var prices = map[abi.ChainEpoch]Pricelist{
abi.ChainEpoch(0): &pricelistV0{
var prices = map[network.Version]Pricelist{
network.Version0: &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1000,
@ -130,7 +129,7 @@ var prices = map[abi.ChainEpoch]Pricelist{
verifyPostDiscount: true,
verifyConsensusFault: 495422,
},
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
network.Version6AndAHalf: &pricelistV0{
computeGasMulti: 1,
storageGasMulti: 1300,
@ -208,21 +207,19 @@ var prices = map[abi.ChainEpoch]Pricelist{
},
}
// PricelistByEpoch finds the latest prices for the given epoch
func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
// since we are storing the prices as map or epoch to price
// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
bestEpoch := abi.ChainEpoch(0)
bestPrice := prices[bestEpoch]
for e, pl := range prices {
// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
if e > bestEpoch && e <= epoch {
bestEpoch = e
// PricelistByVersion finds the latest prices for the given network version
func PricelistByVersion(version network.Version) Pricelist {
bestVersion := network.Version0
bestPrice := prices[bestVersion]
for nv, pl := range prices {
// if `nv > bestVersion` and `nv <= version`
if nv > bestVersion && nv <= version {
bestVersion = nv
bestPrice = pl
}
}
if bestPrice == nil {
panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch))
panic(fmt.Sprintf("bad setup: no gas prices available for version %d", version))
}
return bestPrice
}

View File

@ -41,7 +41,7 @@ var EmptyObjectCid cid.Cid
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
if err := rt.chargeGasSafe(PricelistByVersion(rt.NetworkVersion()).OnCreateActor()); err != nil {
return nil, address.Undef, err
}

View File

@ -135,7 +135,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
gasAvailable: msg.GasLimit,
depth: 0,
numActorsCreated: 0,
pricelist: PricelistByEpoch(vm.blockHeight),
pricelist: PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight)),
allowInternal: true,
callerValidated: false,
executionTrace: types.ExecutionTrace{Msg: msg},
@ -424,7 +424,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
return nil, err
}
pl := PricelistByEpoch(vm.blockHeight)
pl := PricelistByVersion(vm.ntwkVersion(ctx, vm.blockHeight))
msgGas := pl.OnChainMessage(cmsg.ChainLength())
msgGasCost := msgGas.Total()

View File

@ -446,6 +446,9 @@ var StateExecTraceCmd = &cli.Command{
if err != nil {
return err
}
if lookup == nil {
return fmt.Errorf("failed to find message: %s", mcid)
}
ts, err := capi.ChainGetTipSet(ctx, lookup.TipSet)
if err != nil {
@ -1491,6 +1494,10 @@ var StateSearchMsgCmd = &cli.Command{
return err
}
if mw == nil {
return fmt.Errorf("failed to find message: %s", msg)
}
m, err := api.ChainGetMessage(ctx, msg)
if err != nil {
return err

View File

@ -149,7 +149,7 @@ func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.
return addr, ainfo.AuthHeader(), nil
}
func GetAPI(ctx *cli.Context) (api.Common, jsonrpc.ClientCloser, error) {
func GetAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) {
ti, ok := ctx.App.Metadata["repoType"]
if !ok {
log.Errorf("unknown repo type, are you sure you want to use GetAPI?")

View File

@ -5,6 +5,7 @@ import (
"os"
"strings"
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/fatih/color"
@ -14,6 +15,7 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
@ -41,6 +43,7 @@ var actorCmd = &cli.Command{
actorControl,
actorProposeChangeWorker,
actorConfirmChangeWorker,
actorCompactAllocatedCmd,
},
}
@ -987,3 +990,154 @@ var actorConfirmChangeWorker = &cli.Command{
return nil
},
}
var actorCompactAllocatedCmd = &cli.Command{
Name: "compact-allocated",
Usage: "compact allocated sectors bitfield",
Flags: []cli.Flag{
&cli.Uint64Flag{
Name: "mask-last-offset",
Usage: "Mask sector IDs from 0 to 'higest_allocated - offset'",
},
&cli.Uint64Flag{
Name: "mask-upto-n",
Usage: "Mask sector IDs from 0 to 'n'",
},
&cli.BoolFlag{
Name: "really-do-it",
Usage: "Actually send transaction performing the action",
Value: false,
},
},
Action: func(cctx *cli.Context) error {
if !cctx.Bool("really-do-it") {
fmt.Println("Pass --really-do-it to actually execute this action")
return nil
}
if !cctx.Args().Present() {
return fmt.Errorf("must pass address of new owner address")
}
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
if err != nil {
return err
}
defer closer()
api, acloser, err := lcli.GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer acloser()
ctx := lcli.ReqContext(cctx)
maddr, err := nodeApi.ActorAddress(ctx)
if err != nil {
return err
}
mact, err := api.StateGetActor(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
store := adt.WrapStore(ctx, cbor.NewCborStore(blockstore.NewAPIBlockstore(api)))
mst, err := miner.Load(store, mact)
if err != nil {
return err
}
allocs, err := mst.GetAllocatedSectors()
if err != nil {
return err
}
var maskBf bitfield.BitField
{
exclusiveFlags := []string{"mask-last-offset", "mask-upto-n"}
hasFlag := false
for _, f := range exclusiveFlags {
if hasFlag && cctx.IsSet(f) {
return xerrors.Errorf("more than one 'mask` flag set")
}
hasFlag = hasFlag || cctx.IsSet(f)
}
}
switch {
case cctx.IsSet("mask-last-offset"):
last, err := allocs.Last()
if err != nil {
return err
}
m := cctx.Uint64("mask-last-offset")
if last <= m+1 {
return xerrors.Errorf("highest allocated sector lower than mask offset %d: %d", m+1, last)
}
// securty to not brick a miner
if last > 1<<60 {
return xerrors.Errorf("very high last sector number, refusing to mask: %d", last)
}
maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{{Val: true, Len: last - m}}})
if err != nil {
return xerrors.Errorf("forming bitfield: %w", err)
}
case cctx.IsSet("mask-upto-n"):
n := cctx.Uint64("mask-upto-n")
maskBf, err = bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
Runs: []rlepluslazy.Run{{Val: true, Len: n}}})
if err != nil {
return xerrors.Errorf("forming bitfield: %w", err)
}
default:
return xerrors.Errorf("no 'mask' flags set")
}
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
if err != nil {
return err
}
params := &miner2.CompactSectorNumbersParams{
MaskSectorNumbers: maskBf,
}
sp, err := actors.SerializeParams(params)
if err != nil {
return xerrors.Errorf("serializing params: %w", err)
}
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
From: mi.Worker,
To: maddr,
Method: miner.Methods.CompactSectorNumbers,
Value: big.Zero(),
Params: sp,
}, nil)
if err != nil {
return xerrors.Errorf("mpool push: %w", err)
}
fmt.Println("CompactSectorNumbers Message CID:", smsg.Cid())
// wait for it to get mined into a block
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), build.MessageConfidence)
if err != nil {
return err
}
// check it executed successfully
if wait.Receipt.ExitCode != 0 {
fmt.Println("Propose owner change failed!")
return err
}
return nil
},
}

View File

@ -337,6 +337,9 @@ func resolveFromChain(ctx context.Context, api v0api.FullNode, mcid cid.Cid, blo
if err != nil {
return nil, nil, nil, fmt.Errorf("failed to locate message: %w", err)
}
if msgInfo == nil {
return nil, nil, nil, fmt.Errorf("failed to locate message: not found")
}
log.Printf("located message at tipset %s (height: %d) with exit code: %s", msgInfo.TipSet, msgInfo.Height, msgInfo.Receipt.ExitCode)

View File

@ -199,7 +199,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```

View File

@ -144,7 +144,7 @@ Perms: admin
Inputs: `null`
Response: `131328`
Response: `131329`
## Add

View File

@ -280,7 +280,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```
@ -4634,7 +4634,7 @@ Inputs:
]
```
Response: `13`
Response: `1300`
### StateReadState
StateReadState returns the indicated actor's state.

View File

@ -282,7 +282,7 @@ Response:
```json
{
"Version": "string value",
"APIVersion": 131328,
"APIVersion": 131329,
"BlockDelay": 42
}
```
@ -4855,7 +4855,7 @@ Inputs:
]
```
Response: `13`
Response: `1300`
### StateReadState
StateReadState returns the indicated actor's state.

View File

@ -207,6 +207,7 @@ COMMANDS:
control Manage control addresses
propose-change-worker Propose a worker address change
confirm-change-worker Confirm a worker address change
compact-allocated compact allocated sectors bitfield
help, h Shows a list of commands or help for one command
OPTIONS:
@ -361,6 +362,22 @@ OPTIONS:
```
### lotus-miner actor compact-allocated
```
NAME:
lotus-miner actor compact-allocated - compact allocated sectors bitfield
USAGE:
lotus-miner actor compact-allocated [command options] [arguments...]
OPTIONS:
--mask-last-offset value Mask sector IDs from 0 to 'higest_allocated - offset' (default: 0)
--mask-upto-n value Mask sector IDs from 0 to 'n' (default: 0)
--really-do-it Actually send transaction performing the action (default: false)
--help, -h show help (default: false)
```
## lotus-miner info
```
NAME:

View File

@ -69,6 +69,10 @@ func (mgr *CurrentDealInfoManager) dealIDFromPublishDealsMsg(ctx context.Context
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: search msg failed: %w", publishCid, err)
}
if lookup == nil {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: not found", publishCid)
}
if lookup.Receipt.ExitCode != exitcode.Ok {
return dealID, nil, xerrors.Errorf("looking for publish deal message %s: non-ok exit code: %s", publishCid, lookup.Receipt.ExitCode)
}

View File

@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/require"
)
var errNotFound = errors.New("Could not find")
var errNotFound = errors.New("could not find")
func TestGetCurrentDealInfo(t *testing.T) {
ctx := context.Background()
@ -180,6 +180,12 @@ func TestGetCurrentDealInfo(t *testing.T) {
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: search msg failed: something went wrong", dummyCid),
},
"search message not found": {
publishCid: dummyCid,
targetProposal: &proposal,
expectedDealID: zeroDealID,
expectedError: xerrors.Errorf("looking for publish deal message %s: not found", dummyCid),
},
"return code not ok": {
publishCid: dummyCid,
searchMessageLookup: &MsgLookup{

View File

@ -358,8 +358,11 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf
}
params, pcd, tok, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
return err
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)})
}
if params == nil {
return nil // event was sent in preCommitParams
}
deposit, err := collateralSendAmount(ctx.Context(), m.api, m.maddr, cfg, pcd)
@ -403,9 +406,12 @@ func (m *Sealing) handleSubmitPreCommitBatch(ctx statemachine.Context, sector Se
}
params, deposit, _, err := m.preCommitParams(ctx, sector)
if params == nil || err != nil {
if err != nil {
return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("preCommitParams: %w", err)})
}
if params == nil {
return nil // event was sent in preCommitParams
}
res, err := m.precommiter.AddPreCommit(ctx.Context(), sector, deposit, params)
if err != nil {

4
go.mod
View File

@ -40,7 +40,7 @@ require (
github.com/filecoin-project/go-multistore v0.0.3
github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20
github.com/filecoin-project/go-paramfetch v0.0.2-0.20210614165157-25a6c7769498
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe
github.com/filecoin-project/go-statestore v0.1.1
github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b
@ -48,7 +48,7 @@ require (
github.com/filecoin-project/specs-actors/v2 v2.3.5
github.com/filecoin-project/specs-actors/v3 v3.1.1
github.com/filecoin-project/specs-actors/v4 v4.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.1
github.com/filecoin-project/specs-actors/v5 v5.0.2
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506
github.com/filecoin-project/test-vectors/schema v0.0.5
github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1

7
go.sum
View File

@ -305,8 +305,9 @@ github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go
github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48 h1:Jc4OprDp3bRDxbsrXNHPwJabZJM3iDy+ri8/1e0ZnX4=
github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124 h1:veGrNABg/9I7prngrowkhwbvW5d5JN55MNKmbsr5FqA=
github.com/filecoin-project/go-state-types v0.1.1-0.20210722133031-ad9bfe54c124/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw=
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI=
@ -331,8 +332,8 @@ github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIP
github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg=
github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng=
github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI=
github.com/filecoin-project/specs-actors/v5 v5.0.1 h1:PrYm5AKdMlJ/55eRW5laWcnaX66gyyDYBWvH38kNAMo=
github.com/filecoin-project/specs-actors/v5 v5.0.1/go.mod h1:74euMDIXorusOBs/QL/LNkYsXZdDpLJwojWw6T03pdE=
github.com/filecoin-project/specs-actors/v5 v5.0.2 h1:pLNFUt9xtFuhrgZZ0tPnzGchAVu4koyCRIopzkx/OP0=
github.com/filecoin-project/specs-actors/v5 v5.0.2/go.mod h1:E0yeEl6Scl6eWeeWmxwQsAufvOAC72H6ELyh2Y62H90=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw=
github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g=
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=

View File

@ -121,6 +121,7 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) {
searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
require.NoError(t, err)
require.NotNil(t, searchRes)
require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}

View File

@ -635,7 +635,7 @@ func (n *Ensemble) InterconnectAll() *Ensemble {
}
// Connect connects one full node to the provided full nodes.
func (n *Ensemble) Connect(from api.Common, to ...api.Common) *Ensemble {
func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble {
addr, err := from.NetAddrsListen(context.Background())
require.NoError(n.t, err)

View File

@ -213,12 +213,18 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sched := kit.DefaultTestUpgradeSchedule
lastUpgradeHeight := sched[len(sched)-1].Height
och := build.UpgradeClausHeight
build.UpgradeClausHeight = 10
build.UpgradeClausHeight = lastUpgradeHeight + 1
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
ens.InterconnectAll().BeginMining(blocktime)
// Wait till all upgrades are done and we've passed the clause epoch.
client.WaitTillChain(ctx, kit.HeightAtLeast(build.UpgradeClausHeight+1))
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)
@ -268,6 +274,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) {
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), opts)
ens.InterconnectAll().BeginMining(blocktime)
// Ideally we'd be a bit more precise here, but getting the information we need from the
// test framework is more work than it's worth.
//
// We just need to wait till all upgrades are done.
client.WaitTillChain(ctx, kit.HeightAtLeast(20))
maddr, err := miner.ActorAddress(ctx)
require.NoError(t, err)

View File

@ -78,27 +78,38 @@ func ReaderParamEncoder(addr string) jsonrpc.Option {
})
}
type waitReadCloser struct {
// watchReadCloser watches the ReadCloser and closes the watch channel when
// either: (1) the ReaderCloser fails on Read (including with a benign error
// like EOF), or (2) when Close is called.
//
// Use it be notified of terminal states, in situations where a Read failure (or
// EOF) is considered a terminal state too (besides Close).
type watchReadCloser struct {
io.ReadCloser
wait chan struct{}
watch chan struct{}
closeOnce sync.Once
}
func (w *waitReadCloser) Read(p []byte) (int, error) {
func (w *watchReadCloser) Read(p []byte) (int, error) {
n, err := w.ReadCloser.Read(p)
if err != nil {
close(w.wait)
w.closeOnce.Do(func() {
close(w.watch)
})
}
return n, err
}
func (w *waitReadCloser) Close() error {
close(w.wait)
func (w *watchReadCloser) Close() error {
w.closeOnce.Do(func() {
close(w.watch)
})
return w.ReadCloser.Close()
}
func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
var readersLk sync.Mutex
readers := map[uuid.UUID]chan *waitReadCloser{}
readers := map[uuid.UUID]chan *watchReadCloser{}
hnd := func(resp http.ResponseWriter, req *http.Request) {
strId := path.Base(req.URL.Path)
@ -111,14 +122,14 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
readersLk.Lock()
ch, found := readers[u]
if !found {
ch = make(chan *waitReadCloser)
ch = make(chan *watchReadCloser)
readers[u] = ch
}
readersLk.Unlock()
wr := &waitReadCloser{
wr := &watchReadCloser{
ReadCloser: req.Body,
wait: make(chan struct{}),
watch: make(chan struct{}),
}
tctx, cancel := context.WithTimeout(req.Context(), Timeout)
@ -134,7 +145,9 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
}
select {
case <-wr.wait:
case <-wr.watch:
// TODO should we check if we failed the Read, and if so
// return an HTTP 500? i.e. turn watch into a chan error?
case <-req.Context().Done():
log.Errorf("context error in reader stream handler (2): %v", req.Context().Err())
resp.WriteHeader(500)
@ -167,7 +180,7 @@ func ReaderParamDecoder() (http.HandlerFunc, jsonrpc.ServerOption) {
readersLk.Lock()
ch, found := readers[u]
if !found {
ch = make(chan *waitReadCloser)
ch = make(chan *watchReadCloser)
readers[u] = ch
}
readersLk.Unlock()

View File

@ -54,7 +54,7 @@ func (rpn *retrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, min
func (rpn *retrievalProviderNode) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) {
log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length)
si, err := rpn.sectorsStatus(ctx, sectorID, true)
si, err := rpn.sectorsStatus(ctx, sectorID, false)
if err != nil {
return nil, err
}

View File

@ -25,6 +25,7 @@ import (
)
func TestDealPublisher(t *testing.T) {
t.Skip("this test randomly fails in various subtests; see issue #6799")
testCases := []struct {
name string
publishPeriod time.Duration

View File

@ -11,37 +11,37 @@ import (
func MetricedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
var out api.StorageMinerStruct
proxy(a, &out.Internal)
proxy(a, &out.CommonStruct.Internal)
proxy(a, &out)
return &out
}
func MetricedFullAPI(a api.FullNode) api.FullNode {
var out api.FullNodeStruct
proxy(a, &out.Internal)
proxy(a, &out.CommonStruct.Internal)
proxy(a, &out)
return &out
}
func MetricedWorkerAPI(a api.Worker) api.Worker {
var out api.WorkerStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func MetricedWalletAPI(a api.Wallet) api.Wallet {
var out api.WalletStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func MetricedGatewayAPI(a api.Gateway) api.Gateway {
var out api.GatewayStruct
proxy(a, &out.Internal)
proxy(a, &out)
return &out
}
func proxy(in interface{}, out interface{}) {
func proxy(in interface{}, outstr interface{}) {
outs := api.GetInternalStructs(outstr)
for _, out := range outs {
rint := reflect.ValueOf(out).Elem()
ra := reflect.ValueOf(in)
@ -59,6 +59,6 @@ func proxy(in interface{}, out interface{}) {
args[0] = reflect.ValueOf(ctx)
return fn.Call(args)
}))
}
}
}

View File

@ -6,6 +6,7 @@ import (
"os"
"time"
"github.com/filecoin-project/lotus/node/impl/net"
metricsi "github.com/ipfs/go-metrics-interface"
"github.com/filecoin-project/lotus/api"
@ -36,7 +37,6 @@ import (
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node/config"
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/impl/common/mock"
"github.com/filecoin-project/lotus/node/modules"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/helpers"
@ -245,11 +245,11 @@ func ConfigCommon(cfg *config.Common, enableLibp2pNode bool) Option {
}),
ApplyIf(func(s *Settings) bool { return s.Base }), // apply only if Base has already been applied
If(!enableLibp2pNode,
Override(new(common.NetAPI), From(new(mock.MockNetAPI))),
Override(new(api.Net), new(api.NetStub)),
Override(new(api.Common), From(new(common.CommonAPI))),
),
If(enableLibp2pNode,
Override(new(common.NetAPI), From(new(common.Libp2pNetAPI))),
Override(new(api.Net), From(new(net.NetAPI))),
Override(new(api.Common), From(new(common.CommonAPI))),
Override(StartListeningKey, lp2p.StartListening(cfg.Libp2p.ListenAddresses)),
Override(ConnectionManagerKey, lp2p.ConnectionManager(
@ -312,12 +312,14 @@ func Repo(r repo.Repo) Option {
Override(new(dtypes.BasicStateBlockstore), modules.StateSplitBlockstore),
Override(new(dtypes.BaseBlockstore), From(new(dtypes.SplitBlockstore))),
Override(new(dtypes.ExposedBlockstore), modules.ExposedSplitBlockstore),
Override(new(dtypes.GCReferenceProtector), modules.SplitBlockstoreGCReferenceProtector),
),
If(!cfg.EnableSplitstore,
Override(new(dtypes.BasicChainBlockstore), modules.ChainFlatBlockstore),
Override(new(dtypes.BasicStateBlockstore), modules.StateFlatBlockstore),
Override(new(dtypes.BaseBlockstore), From(new(dtypes.UniversalBlockstore))),
Override(new(dtypes.ExposedBlockstore), From(new(dtypes.UniversalBlockstore))),
Override(new(dtypes.GCReferenceProtector), modules.NoopGCReferenceProtector),
),
Override(new(dtypes.ChainBlockstore), From(new(dtypes.BasicChainBlockstore))),

View File

@ -251,6 +251,8 @@ type Splitstore struct {
ColdStoreType string
HotStoreType string
MarkSetType string
HotStoreMessageRetention uint64
}
// // Full Node

View File

@ -436,7 +436,19 @@ func (a *API) ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid)
if piece != nil && !piece.Equals(*p.PieceCID) {
continue
}
out = append(out, a.makeRetrievalQuery(ctx, p, root, piece, rm.QueryParams{}))
// do not rely on local data with respect to peer id
// fetch an up-to-date miner peer id from chain
mi, err := a.StateMinerInfo(ctx, p.Address, types.EmptyTSK)
if err != nil {
return nil, err
}
pp := rm.RetrievalPeer{
Address: p.Address,
ID: *mi.PeerId,
}
out = append(out, a.makeRetrievalQuery(ctx, pp, root, piece, rm.QueryParams{}))
}
return out, nil

View File

@ -10,8 +10,8 @@ import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
@ -21,8 +21,6 @@ var session = uuid.New()
type CommonAPI struct {
fx.In
NetAPI
APISecret *dtypes.APIAlg
ShutdownChan dtypes.ShutdownChan
}
@ -48,6 +46,24 @@ func (a *CommonAPI) AuthNew(ctx context.Context, perms []auth.Permission) ([]byt
return jwt.Sign(&p, (*jwt.HMACSHA)(a.APISecret))
}
func (a *CommonAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return build.OpenRPCDiscoverJSON_Full(), nil
}
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
v, err := api.VersionForType(api.RunningNodeType)
if err != nil {
return api.APIVersion{}, err
}
return api.APIVersion{
Version: build.UserVersion(),
APIVersion: v,
BlockDelay: build.BlockDelaySecs,
}, nil
}
func (a *CommonAPI) LogList(context.Context) ([]string, error) {
return logging.GetSubsystems(), nil
}
@ -68,17 +84,3 @@ func (a *CommonAPI) Session(ctx context.Context) (uuid.UUID, error) {
func (a *CommonAPI) Closing(ctx context.Context) (<-chan struct{}, error) {
return make(chan struct{}), nil // relies on jsonrpc closing
}
func (a *CommonAPI) Version(context.Context) (api.APIVersion, error) {
v, err := api.VersionForType(api.RunningNodeType)
if err != nil {
return api.APIVersion{}, err
}
return api.APIVersion{
Version: build.UserVersion(),
APIVersion: v,
BlockDelay: build.BlockDelaySecs,
}, nil
}

View File

@ -1,101 +0,0 @@
package mock
import (
"context"
"errors"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"go.uber.org/fx"
)
var (
errNotImplemented = errors.New("not implemented")
)
type MockNetAPI struct {
fx.In
}
func (a *MockNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
return "", errNotImplemented
}
func (a *MockNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (conn network.Connectedness, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
return errNotImplemented
}
func (a *MockNetAPI) NetAddrsListen(context.Context) (ai peer.AddrInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
return errNotImplemented
}
func (a *MockNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (ai peer.AddrInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBandwidthStats(ctx context.Context) (s metrics.Stats, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return nil, errNotImplemented
}
func (a *MockNetAPI) ID(context.Context) (p peer.ID, err error) {
err = errNotImplemented
return
}
func (a *MockNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
return errNotImplemented
}
func (a *MockNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
return errNotImplemented
}
func (a *MockNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
err = errNotImplemented
return
}

View File

@ -1,34 +0,0 @@
package common
import (
"context"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
)
type NetAPI interface {
NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error)
NetPubsubScores(context.Context) ([]api.PubsubScore, error)
NetPeers(context.Context) ([]peer.AddrInfo, error)
NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error)
NetConnect(ctx context.Context, p peer.AddrInfo) error
NetAddrsListen(context.Context) (peer.AddrInfo, error)
NetDisconnect(ctx context.Context, p peer.ID) error
NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error)
NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error)
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error)
ID(context.Context) (peer.ID, error)
NetBlockAdd(ctx context.Context, acl api.NetBlockList) error
NetBlockRemove(ctx context.Context, acl api.NetBlockList) error
NetBlockList(ctx context.Context) (api.NetBlockList, error)
}

View File

@ -14,6 +14,7 @@ import (
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/impl/full"
"github.com/filecoin-project/lotus/node/impl/market"
"github.com/filecoin-project/lotus/node/impl/net"
"github.com/filecoin-project/lotus/node/impl/paych"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/lp2p"
@ -23,6 +24,7 @@ var log = logging.Logger("node")
type FullNodeAPI struct {
common.CommonAPI
net.NetAPI
full.ChainAPI
client.API
full.MpoolAPI

View File

@ -1,4 +1,4 @@
package common
package net
import (
"context"
@ -14,7 +14,7 @@ import (
var cLog = logging.Logger("conngater")
func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
func (a *NetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.BlockPeer(p)
if err != nil {
@ -89,7 +89,7 @@ func (a *Libp2pNetAPI) NetBlockAdd(ctx context.Context, acl api.NetBlockList) er
return nil
}
func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
func (a *NetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList) error {
for _, p := range acl.Peers {
err := a.ConnGater.UnblockPeer(p)
if err != nil {
@ -124,7 +124,7 @@ func (a *Libp2pNetAPI) NetBlockRemove(ctx context.Context, acl api.NetBlockList)
return nil
}
func (a *Libp2pNetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
func (a *NetAPI) NetBlockList(ctx context.Context) (result api.NetBlockList, err error) {
result.Peers = a.ConnGater.ListBlockedPeers()
for _, ip := range a.ConnGater.ListBlockedAddrs() {
result.IPAddrs = append(result.IPAddrs, ip.String())

View File

@ -1,29 +1,28 @@
package common
package net
import (
"context"
"sort"
"strings"
"go.uber.org/fx"
"github.com/libp2p/go-libp2p-core/host"
metrics "github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/metrics"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-core/protocol"
swarm "github.com/libp2p/go-libp2p-swarm"
basichost "github.com/libp2p/go-libp2p/p2p/host/basic"
"github.com/libp2p/go-libp2p/p2p/net/conngater"
ma "github.com/multiformats/go-multiaddr"
"go.uber.org/fx"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/modules/lp2p"
)
type Libp2pNetAPI struct {
type NetAPI struct {
fx.In
RawHost lp2p.RawHost
@ -34,11 +33,15 @@ type Libp2pNetAPI struct {
Sk *dtypes.ScoreKeeper
}
func (a *Libp2pNetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
func (a *NetAPI) ID(context.Context) (peer.ID, error) {
return a.Host.ID(), nil
}
func (a *NetAPI) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
return a.Host.Network().Connectedness(pid), nil
}
func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
func (a *NetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, error) {
scores := a.Sk.Get()
out := make([]api.PubsubScore, len(scores))
i := 0
@ -54,7 +57,7 @@ func (a *Libp2pNetAPI) NetPubsubScores(context.Context) ([]api.PubsubScore, erro
return out, nil
}
func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
func (a *NetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
conns := a.Host.Network().Conns()
out := make([]peer.AddrInfo, len(conns))
@ -70,7 +73,7 @@ func (a *Libp2pNetAPI) NetPeers(context.Context) ([]peer.AddrInfo, error) {
return out, nil
}
func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
func (a *NetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) {
info := &api.ExtendedPeerInfo{ID: p}
agent, err := a.Host.Peerstore().Get(p, "AgentVersion")
@ -101,7 +104,7 @@ func (a *Libp2pNetAPI) NetPeerInfo(_ context.Context, p peer.ID) (*api.ExtendedP
return info, nil
}
func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
func (a *NetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
if swrm, ok := a.Host.Network().(*swarm.Swarm); ok {
swrm.Backoff().Clear(p.ID)
}
@ -109,22 +112,22 @@ func (a *Libp2pNetAPI) NetConnect(ctx context.Context, p peer.AddrInfo) error {
return a.Host.Connect(ctx, p)
}
func (a *Libp2pNetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
func (a *NetAPI) NetAddrsListen(context.Context) (peer.AddrInfo, error) {
return peer.AddrInfo{
ID: a.Host.ID(),
Addrs: a.Host.Addrs(),
}, nil
}
func (a *Libp2pNetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
func (a *NetAPI) NetDisconnect(ctx context.Context, p peer.ID) error {
return a.Host.Network().ClosePeer(p)
}
func (a *Libp2pNetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
func (a *NetAPI) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
return a.Router.FindPeer(ctx, p)
}
func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
func (a *NetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err error) {
autonat := a.RawHost.(*basichost.BasicHost).GetAutoNat()
if autonat == nil {
@ -148,7 +151,7 @@ func (a *Libp2pNetAPI) NetAutoNatStatus(ctx context.Context) (i api.NatInfo, err
}, nil
}
func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
func (a *NetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) {
ag, err := a.Host.Peerstore().Get(p, "AgentVersion")
if err != nil {
return "", err
@ -161,11 +164,11 @@ func (a *Libp2pNetAPI) NetAgentVersion(ctx context.Context, p peer.ID) (string,
return ag.(string), nil
}
func (a *Libp2pNetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStats(ctx context.Context) (metrics.Stats, error) {
return a.Reporter.GetBandwidthTotals(), nil
}
func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) {
out := make(map[string]metrics.Stats)
for p, s := range a.Reporter.GetBandwidthByPeer() {
out[p.String()] = s
@ -173,14 +176,8 @@ func (a *Libp2pNetAPI) NetBandwidthStatsByPeer(ctx context.Context) (map[string]
return out, nil
}
func (a *Libp2pNetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
func (a *NetAPI) NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) {
return a.Reporter.GetBandwidthByProtocol(), nil
}
func (a *Libp2pNetAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) {
return build.OpenRPCDiscoverJSON_Full(), nil
}
func (a *Libp2pNetAPI) ID(context.Context) (peer.ID, error) {
return a.Host.ID(), nil
}
var _ api.Net = &NetAPI{}

View File

@ -17,6 +17,7 @@ import (
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
@ -38,7 +39,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl/common"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/storage"
"github.com/filecoin-project/lotus/storage/sectorblocks"
@ -46,7 +46,10 @@ import (
)
type StorageMinerAPI struct {
common.CommonAPI
fx.In
api.Common
api.Net
Full api.FullNode
LocalStore *stores.Local

View File

@ -80,6 +80,7 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked
cfg := &splitstore.Config{
MarkSetType: cfg.Splitstore.MarkSetType,
DiscardColdBlocks: cfg.Splitstore.ColdStoreType == "discard",
HotStoreMessageRetention: cfg.Splitstore.HotStoreMessageRetention,
}
ss, err := splitstore.Open(path, ds, hot, cold, cfg)
if err != nil {
@ -95,6 +96,14 @@ func SplitBlockstore(cfg *config.Chainstore) func(lc fx.Lifecycle, r repo.Locked
}
}
func SplitBlockstoreGCReferenceProtector(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.GCReferenceProtector {
return s.(dtypes.GCReferenceProtector)
}
func NoopGCReferenceProtector(_ fx.Lifecycle) dtypes.GCReferenceProtector {
return dtypes.NoopGCReferenceProtector{}
}
func ExposedSplitBlockstore(_ fx.Lifecycle, s dtypes.SplitBlockstore) dtypes.ExposedBlockstore {
return s.(*splitstore.SplitStore).Expose()
}

View File

@ -58,7 +58,7 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty
return blockservice.New(bs, rem)
}
func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal) (*messagepool.MessagePool, error) {
func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) {
mp, err := messagepool.New(mpp, ds, nn, j)
if err != nil {
return nil, xerrors.Errorf("constructing mpool: %w", err)
@ -68,6 +68,7 @@ func MessagePool(lc fx.Lifecycle, mpp messagepool.Provider, ds dtypes.MetadataDS
return mp.Close()
},
})
protector.AddProtector(mp.ForEachPendingMessage)
return mp, nil
}

View File

@ -0,0 +1,13 @@
package dtypes
import (
cid "github.com/ipfs/go-cid"
)
type GCReferenceProtector interface {
AddProtector(func(func(cid.Cid) error) error)
}
type NoopGCReferenceProtector struct{}
func (p NoopGCReferenceProtector) AddProtector(func(func(cid.Cid) error) error) {}

View File

@ -327,6 +327,21 @@ func (fsr *fsLockedRepo) Blockstore(ctx context.Context, domain BlockstoreDomain
return
}
//
// Tri-state environment variable LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC
// - unset == the default (currently fsync enabled)
// - set with a false-y value == fsync enabled no matter what a future default is
// - set with any other value == fsync is disabled ignored defaults (recommended for day-to-day use)
//
if nosyncBs, nosyncBsSet := os.LookupEnv("LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC"); nosyncBsSet {
nosyncBs = strings.ToLower(nosyncBs)
if nosyncBs == "" || nosyncBs == "0" || nosyncBs == "false" || nosyncBs == "no" {
opts.SyncWrites = true
} else {
opts.SyncWrites = false
}
}
bs, err := badgerbs.Open(opts)
if err != nil {
fsr.bsErr = err

View File

@ -134,7 +134,7 @@ func (m *Miner) SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnC
LastErr: info.LastErr,
Log: log,
// on chain info
SealProof: 0,
SealProof: info.SectorType,
Activation: 0,
Expiration: 0,
DealWeight: big.Zero(),

View File

@ -659,6 +659,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t
if !bytes.Equal(checkRand, rand) {
log.Warnw("windowpost randomness changed", "old", rand, "new", checkRand, "ts-height", ts.Height(), "challenge-height", di.Challenge, "tsk", ts.Key())
rand = checkRand
continue
}