resolve merge conflicts with master
This commit is contained in:
commit
0d6493ec3f
526
CHANGELOG.md
526
CHANGELOG.md
@ -1,12 +1,141 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.15.0 / 2022-03-09
|
||||
|
||||
This is an optional release with retrieval improvements(client side), SP ux with unsealing, snap deals and regular deal making and many other new features, improvements and bug fixes.
|
||||
|
||||
## Highlights
|
||||
- feat:sealing: StartEpochSealingBuffer triggers packing on time([filecoin-project/lotus#7905](https://github.com/filecoin-project/lotus/pull/7905))
|
||||
- use the `StartEpochSealingBuffer` configuration variable as a way to enforce that sectors are packed for sealing / updating no matter how many deals they have if the nearest deal start date is close enough to the present.
|
||||
- feat: #6017 market: retrieval ask CLI command ([filecoin-project/lotus#7814](https://github.com/filecoin-project/lotus/pull/7814))
|
||||
- feat(graphsync): allow setting of per-peer incoming requests for miners ([filecoin-project/lotus#7578](https://github.com/filecoin-project/lotus/pull/7578))
|
||||
- by setting `SimultaneousTransfersForStoragePerClient` in deal making configuration.
|
||||
- Make retrieval even faster ([filecoin-project/lotus#7746](https://github.com/filecoin-project/lotus/pull/7746))
|
||||
- feat: #7747 sealing: Adding conf variable for capping number of concurrent unsealing jobs (#7884) ([filecoin-project/lotus#7884](https://github.com/filecoin-project/lotus/pull/7884))
|
||||
- by setting `MaxConcurrentUnseals` in `DAGStoreConfig`
|
||||
|
||||
## New Features
|
||||
- feat: mpool: Cache state nonces ([filecoin-project/lotus#8005](https://github.com/filecoin-project/lotus/pull/8005))
|
||||
- chore: build: make the OhSnap epoch configurable by an envvar for devnets ([filecoin-project/lotus#7995](https://github.com/filecoin-project/lotus/pull/7995))
|
||||
- Shed: Add a util to send a batch of messages ([filecoin-project/lotus#7667](https://github.com/filecoin-project/lotus/pull/7667))
|
||||
- Add api for transfer diagnostics ([filecoin-project/lotus#7759](https://github.com/filecoin-project/lotus/pull/7759))
|
||||
- Shed: Add a util to list terminated deals ([filecoin-project/lotus#7774](https://github.com/filecoin-project/lotus/pull/7774))
|
||||
- Expose EnableGasTracing as an env_var ([filecoin-project/lotus#7750](https://github.com/filecoin-project/lotus/pull/7750))
|
||||
- Command to list active sector locks ([filecoin-project/lotus#7735](https://github.com/filecoin-project/lotus/pull/7735))
|
||||
- Initial switch to OpenTelemetry ([filecoin-project/lotus#7725](https://github.com/filecoin-project/lotus/pull/7725))
|
||||
|
||||
## Improvements
|
||||
- splitstore sortless compaction ([filecoin-project/lotus#8008](https://github.com/filecoin-project/lotus/pull/8008))
|
||||
- perf: chain: Make drand logs in daemon less noisy (#7955) ([filecoin-project/lotus#7955](https://github.com/filecoin-project/lotus/pull/7955))
|
||||
- chore: shed: storage stats 2.0 ([filecoin-project/lotus#7941](https://github.com/filecoin-project/lotus/pull/7941))
|
||||
- misc: api: Annotate lotus tests according to listed behaviors ([filecoin-project/lotus#7835](https://github.com/filecoin-project/lotus/pull/7835))
|
||||
- some basic splitstore refactors ([filecoin-project/lotus#7999](https://github.com/filecoin-project/lotus/pull/7999))
|
||||
- chore: sealer: quieten a log ([filecoin-project/lotus#7998](https://github.com/filecoin-project/lotus/pull/7998))
|
||||
- tvx: supply network version when extracting messages. ([filecoin-project/lotus#7996](https://github.com/filecoin-project/lotus/pull/7996))
|
||||
- chore: remove inaccurate comment in sealtasks ([filecoin-project/lotus#7977](https://github.com/filecoin-project/lotus/pull/7977))
|
||||
- Refactor: VM: Remove the NetworkVersionGetter ([filecoin-project/lotus#7818](https://github.com/filecoin-project/lotus/pull/7818))
|
||||
- refactor: state: Move randomness versioning out of the VM ([filecoin-project/lotus#7816](https://github.com/filecoin-project/lotus/pull/7816))
|
||||
- updating to new datastore/blockstore code with contexts ([filecoin-project/lotus#7646](https://github.com/filecoin-project/lotus/pull/7646))
|
||||
- Mempool msg selection should respect block message limits ([filecoin-project/lotus#7321](https://github.com/filecoin-project/lotus/pull/7321))
|
||||
- Minor improvement for OpenTelemetry ([filecoin-project/lotus#7760](https://github.com/filecoin-project/lotus/pull/7760))
|
||||
- Sort lotus-miner retrieval-deals by dealId ([filecoin-project/lotus#7749](https://github.com/filecoin-project/lotus/pull/7749))
|
||||
- dagstore pieceReader: Always read full in ReadAt ([filecoin-project/lotus#7737](https://github.com/filecoin-project/lotus/pull/7737))
|
||||
|
||||
## Bug Fixes
|
||||
- fix: sealing: Stop recovery attempts after fault ([filecoin-project/lotus#8014](https://github.com/filecoin-project/lotus/pull/8014))
|
||||
- fix:snap: pay for the collateral difference needed if the miner available balance is insufficient ([filecoin-project/lotus#8234](https://github.com/filecoin-project/lotus/pull/8234))
|
||||
- sealer: fix error message ([filecoin-project/lotus#8136](https://github.com/filecoin-project/lotus/pull/8136))
|
||||
- typo in variable name ([filecoin-project/lotus#8134](https://github.com/filecoin-project/lotus/pull/8134))
|
||||
- fix: sealer: allow enable/disabling ReplicaUpdate tasks ([filecoin-project/lotus#8093](https://github.com/filecoin-project/lotus/pull/8093))
|
||||
- chore: chain: fix log ([filecoin-project/lotus#7993](https://github.com/filecoin-project/lotus/pull/7993))
|
||||
- Fix: chain: create a new VM for each epoch ([filecoin-project/lotus#7966](https://github.com/filecoin-project/lotus/pull/7966))
|
||||
- fix: doc generation struct slice example value ([filecoin-project/lotus#7851](https://github.com/filecoin-project/lotus/pull/7851))
|
||||
- fix: returned error not be accept correctly ([filecoin-project/lotus#7852](https://github.com/filecoin-project/lotus/pull/7852))
|
||||
- fix: #7577 markets: When retrying Add Piece, first seek to start of reader ([filecoin-project/lotus#7812](https://github.com/filecoin-project/lotus/pull/7812))
|
||||
- misc: n/a sealing: Fix grammatical error in a log warning message ([filecoin-project/lotus#7831](https://github.com/filecoin-project/lotus/pull/7831))
|
||||
- sectors update-state checks if sector exists before changing its state ([filecoin-project/lotus#7762](https://github.com/filecoin-project/lotus/pull/7762))
|
||||
- SplitStore: supress compaction near upgrades ([filecoin-project/lotus#7734](https://github.com/filecoin-project/lotus/pull/7734))
|
||||
|
||||
## Dependency Updates
|
||||
- github.com/filecoin-project/go-commp-utils (v0.1.2 -> v0.1.3):
|
||||
- github.com/filecoin-project/dagstore (v0.4.3 -> v0.4.4):
|
||||
- github.com/filecoin-project/go-fil-markets (v1.13.4 -> v1.19.2):
|
||||
- github.com/filecoin-project/go-statestore (v0.1.1 -> v0.2.0):
|
||||
- github.com/filecoin-project/go-storedcounter (v0.0.0-20200421200003-1c99c62e8a5b -> v0.1.0):
|
||||
- github.com/filecoin-project/specs-actors/v2 (v2.3.5 -> v2.3.6):
|
||||
- feat(deps): update markets stack ([filecoin-project/lotus#7959](https://github.com/filecoin-project/lotus/pull/7959))
|
||||
- Use go-libp2p-connmgr v0.3.1 ([filecoin-project/lotus#7957](https://github.com/filecoin-project/lotus/pull/7957))
|
||||
- dep/fix 7701 Dependency: update to ipld-legacy to v0.1.1 ([filecoin-project/lotus#7751](https://github.com/filecoin-project/lotus/pull/7751))
|
||||
|
||||
## Others
|
||||
- chore: backport: release ([filecoin-project/lotus#8245](https://github.com/filecoin-project/lotus/pull/8245))
|
||||
- Lotus release v1.15.0-rc3 ([filecoin-project/lotus#8236](https://github.com/filecoin-project/lotus/pull/8236))
|
||||
- Lotus release v1.15.0-rc2 ([filecoin-project/lotus#8211](https://github.com/filecoin-project/lotus/pull/8211))
|
||||
- Merge branch 'releases' into release/v1.15.0
|
||||
- chore: build: backport releases ([filecoin-project/lotus#8193](https://github.com/filecoin-project/lotus/pull/8193))
|
||||
- Merge branch 'releases' into release/v1.15.0
|
||||
- bump the version to v1.15.0-rc1
|
||||
- chore: build: v1.14.0 -> master ([filecoin-project/lotus#8053](https://github.com/filecoin-project/lotus/pull/8053))
|
||||
- chore: merge release/v1.14.0 PRs into master ([filecoin-project/lotus#7979](https://github.com/filecoin-project/lotus/pull/7979))
|
||||
- chore: update PR template ([filecoin-project/lotus#7918](https://github.com/filecoin-project/lotus/pull/7918))
|
||||
- build: release: bump master version to v1.15.0-dev ([filecoin-project/lotus#7922](https://github.com/filecoin-project/lotus/pull/7922))
|
||||
- misc: docs: remove issue number from the pr title ([filecoin-project/lotus#7902](https://github.com/filecoin-project/lotus/pull/7902))
|
||||
- Snapcraft grade no develgrade ([filecoin-project/lotus#7802](https://github.com/filecoin-project/lotus/pull/7802))
|
||||
- chore: create pull_request_template.md ([filecoin-project/lotus#7726](https://github.com/filecoin-project/lotus/pull/7726))
|
||||
- Disable appimage ([filecoin-project/lotus#7707](https://github.com/filecoin-project/lotus/pull/7707))
|
||||
|
||||
## Contributors
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @arajasek | 73 | +7232/-2778 | 386 |
|
||||
| @zenground0 | 27 | +5604/-1049 | 219 |
|
||||
| @vyzo | 118 | +4356/-1470 | 253 |
|
||||
| @zl | 1 | +3725/-309 | 8 |
|
||||
| @dirkmc | 7 | +1392/-1110 | 61 |
|
||||
| arajasek | 37 | +221/-1329 | 90 |
|
||||
| @magik6k | 33 | +1138/-336 | 101 |
|
||||
| @whyrusleeping | 2 | +483/-585 | 28 |
|
||||
| Darko Brdareski | 14 | +725/-276 | 154 |
|
||||
| @rvagg | 2 | +43/-947 | 10 |
|
||||
| @hannahhoward | 5 | +436/-335 | 31 |
|
||||
| @hannahhoward | 12 | +507/-133 | 37 |
|
||||
| @jennijuju | 27 | +333/-178 | 54 |
|
||||
| @TheMenko | 8 | +237/-179 | 17 |
|
||||
| c r | 2 | +227/-45 | 12 |
|
||||
| @dirkmck | 12 | +188/-40 | 27 |
|
||||
| @ribasushi | 3 | +128/-62 | 3 |
|
||||
| @raulk | 6 | +128/-49 | 9 |
|
||||
| @Whyrusleeping | 1 | +76/-70 | 8 |
|
||||
| @Stebalien | 1 | +55/-37 | 1 |
|
||||
| @jennijuju | 11 | +29/-16 | 11 |
|
||||
| @aarshkshah1992 | 1 | +23/-19 | 5 |
|
||||
| @travisperson | 1 | +0/-18 | 2 |
|
||||
| @gstuart | 3 | +12/-1 | 3 |
|
||||
| @coryschwartz | 4 | +5/-6 | 4 |
|
||||
| @pefish | 1 | +4/-3 | 1 |
|
||||
| @Kubuxu | 1 | +5/-2 | 2 |
|
||||
| Colin Kennedy | 1 | +4/-2 | 1 |
|
||||
| Rob Quist | 1 | +2/-2 | 1 |
|
||||
| @shotcollin | 1 | +1/-1 | 1 |
|
||||
|
||||
|
||||
# 1.14.4 / 2022-03-03
|
||||
|
||||
This is a *highly recommended* optional release for storage providers that are doing snap deals. This fix the bug
|
||||
that causes some snap deal sectors are stuck in `FinalizeReplicaUpdate`. In addition, SPs should be able to force
|
||||
update sectors status without getting blocked by `normal shutdown of state machine`.
|
||||
|
||||
# v1.14.3 / 2022-02-28
|
||||
|
||||
This is an **optional** release, that includes a fix to properly register the `--really-do-it` flag for abort-upgrade.
|
||||
|
||||
# 1.14.2 / 2022-02-24
|
||||
|
||||
This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
This is an **optional** release of lotus, that's had a couple more improvements w.r.t Snap experience for storage providers in preparation of the[upcoming OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
|
||||
Note that the network is STILL scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to at least Lotus v1.14.0 before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||
|
||||
Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out!
|
||||
Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-project/lotus/discussions/8141) out!
|
||||
|
||||
## Bug Fixes
|
||||
- fix lotus-bench for sealing jobs (#8173)
|
||||
@ -15,8 +144,8 @@ Wanna know how to Snap your deal? Check [this](https://github.com/filecoin-proje
|
||||
- fix: sealing: missing file type (#8180)
|
||||
|
||||
## Others
|
||||
- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong
|
||||
commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore,
|
||||
- Retract force-pushed v1.14.0 to work around stale gomod caches (#8159): We originally tagged v1.14.0 off the wrong
|
||||
commit and fixed that by a force push, in which is a really bad practise since it messes up the go mod. Therefore,
|
||||
we want to retract it and users may use v1.14.1&^.
|
||||
|
||||
## Contributors
|
||||
@ -35,7 +164,7 @@ This is an **optional** release of lotus, that fixes the incorrect *comment* of
|
||||
|
||||
# 1.14.0 / 2022-02-17
|
||||
|
||||
This is a MANDATORY release of Lotus that introduces [Filecoin network v15,
|
||||
This is a MANDATORY release of Lotus that introduces [Filecoin network v15,
|
||||
codenamed the OhSnap upgrade](https://github.com/filecoin-project/community/discussions/74?sort=new#discussioncomment-1922550).
|
||||
|
||||
The network is scheduled to upgrade to v15 on March 1st at 2022-03-01T15:00:00Z. All node operators, including storage providers, must upgrade to this release (or a later release) before that time. Storage providers must update their daemons, miners, and worker(s).
|
||||
@ -52,7 +181,7 @@ It is recommended that storage providers download the new params before updating
|
||||
- Upgrade the Lotus daemon and miner **when the previous step is complete**
|
||||
|
||||
All node operators, including storage providers, should be aware that a pre-migration will begin at 2022-03-01T13:30:00Z (90 minutes before the real upgrade). The pre-migration will take between 20 and 50 minutes, depending on hardware specs. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
||||
|
||||
|
||||
## New Features and Changes
|
||||
- Integrate actor v7-rc1:
|
||||
- Integrate v7 actors ([#7617](https://github.com/filecoin-project/lotus/pull/7617))
|
||||
@ -78,7 +207,7 @@ All node operators, including storage providers, should be aware that a pre-migr
|
||||
- Fix: state: circsuypply calc around null blocks ([#7890](https://github.com/filecoin-project/lotus/pull/7890))
|
||||
- Mempool msg selection should respect block message limits ([#7321](https://github.com/filecoin-project/lotus/pull/7321))
|
||||
SplitStore: supress compaction near upgrades ([#7734](https://github.com/filecoin-project/lotus/pull/7734))
|
||||
|
||||
|
||||
## Others
|
||||
- chore: create pull_request_template.md ([#7726](https://github.com/filecoin-project/lotus/pull/7726))
|
||||
|
||||
@ -103,13 +232,13 @@ All node operators, including storage providers, should be aware that a pre-migr
|
||||
|
||||
# v1.13.2 / 2022-01-09
|
||||
|
||||
Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like
|
||||
worker management, schedule enhancements and so on.
|
||||
Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like
|
||||
worker management, schedule enhancements and so on.
|
||||
|
||||
## Highlights
|
||||
- 🚀🚀🚀Improve retrieval deal experience
|
||||
- Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer
|
||||
speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new
|
||||
- Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer
|
||||
speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new
|
||||
retrieval APIs to provide a better client experience.
|
||||
- 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693))
|
||||
- This change makes most free, small retrievals sub-second
|
||||
@ -201,7 +330,7 @@ worker management, schedule enhancements and so on.
|
||||
| @jennijuju | 1 | +1/-1 | 1 |
|
||||
| @hunjixin | 1 | +1/-0 | 1 |
|
||||
|
||||
|
||||
|
||||
|
||||
# v1.13.1 / 2021-11-26
|
||||
|
||||
@ -284,32 +413,32 @@ Contributors
|
||||
| @hannahhoward | 1 | +3/-2 | 2 |
|
||||
| Marten Seemann | 1 | +3/-0 | 1 |
|
||||
| @ZenGround0 | 1 | +1/-1 | 1 |
|
||||
|
||||
|
||||
|
||||
# v1.13.0 / 2021-10-18
|
||||
|
||||
Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming
|
||||
Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming
|
||||
[Network v14 Chocolate upgrade](https://github.com/filecoin-project/lotus/discussions/7431).
|
||||
This feature release includes the latest functionalities and improvements, like data transfer rate-limiting for both storage and retrieval deals, proof v10 with CUDA support, etc. You can find more details in the Changelog below.
|
||||
|
||||
## Highlights
|
||||
- Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405))
|
||||
- `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20.
|
||||
- If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`.
|
||||
- If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`.
|
||||
- `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20.
|
||||
- If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`.
|
||||
- If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`.
|
||||
- Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420))
|
||||
- This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`.
|
||||
- You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time.
|
||||
- This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`.
|
||||
- You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time.
|
||||
|
||||
## New Features
|
||||
- Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393))
|
||||
- This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents.
|
||||
- For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output`
|
||||
- This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents.
|
||||
- For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output`
|
||||
- Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418))
|
||||
- feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341))
|
||||
- Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers.
|
||||
- Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers.
|
||||
- Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412))
|
||||
- You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml).
|
||||
- You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml).
|
||||
- lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355))
|
||||
- Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328))
|
||||
- Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308))
|
||||
@ -326,7 +455,7 @@ This feature release includes the latest functionalities and improvements, like
|
||||
- Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306))
|
||||
- Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304))
|
||||
- Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305))
|
||||
- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474))
|
||||
- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474))
|
||||
|
||||
## Bug Fixes
|
||||
- StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450))
|
||||
@ -381,7 +510,7 @@ This feature release includes the latest functionalities and improvements, like
|
||||
|
||||
# v1.12.0 / 2021-10-12
|
||||
|
||||
This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z.
|
||||
This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z.
|
||||
|
||||
The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0)
|
||||
|
||||
@ -395,7 +524,7 @@ The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](ht
|
||||
Note that this release is built on top of lotus v1.11.3. Enterprising users like storage providers, data brokers and others are recommended to use lotus v1.13.0 for latest new features, improvements and bug fixes.
|
||||
|
||||
## New Features and Changes
|
||||
- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment:
|
||||
- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment:
|
||||
- Precommit batch balancer support/config ([filecoin-project/lotus#7410](https://github.com/filecoin-project/lotus/pull/7410))
|
||||
- Set `BatchPreCommitAboveBaseFee` to decide whether sending out a PreCommits in individual messages or in a batch.
|
||||
- The default value of `BatchPreCommitAboveBaseFee` and `AggregateAboveBaseFee` are now updated to 0.32nanoFIL.
|
||||
@ -412,17 +541,17 @@ Note that this release is built on top of lotus v1.11.3. Enterprising users like
|
||||
## Dependency Updates
|
||||
- Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0)
|
||||
- **Protocol changes**
|
||||
- Multisig Approve only hashes when hash in params
|
||||
- FIP 0020 WithdrawBalance methods return withdrawn value
|
||||
- FIP 0021 Fix bug in power calculation when extending verified deals sectors
|
||||
- FIP 0022 PublishStorageDeals drops errors in batch
|
||||
- FIP 0024 BatchBalancer update and burn added to PreCommitBatch
|
||||
- FIP 0026 Add FaultMaxAge extension
|
||||
- Reduce calls to power and reward actors by passing values from power cron
|
||||
- Defensive programming hardening power cron against programmer error
|
||||
- Multisig Approve only hashes when hash in params
|
||||
- FIP 0020 WithdrawBalance methods return withdrawn value
|
||||
- FIP 0021 Fix bug in power calculation when extending verified deals sectors
|
||||
- FIP 0022 PublishStorageDeals drops errors in batch
|
||||
- FIP 0024 BatchBalancer update and burn added to PreCommitBatch
|
||||
- FIP 0026 Add FaultMaxAge extension
|
||||
- Reduce calls to power and reward actors by passing values from power cron
|
||||
- Defensive programming hardening power cron against programmer error
|
||||
- **Implementation changes**
|
||||
- Move to xerrors
|
||||
- Improved logging: burn events are not logged with reasons and burned value.
|
||||
- Move to xerrors
|
||||
- Improved logging: burn events are not logged with reasons and burned value.
|
||||
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379):
|
||||
|
||||
## Others
|
||||
@ -447,20 +576,20 @@ Note that this release is built on top of lotus v1.11.3. Enterprising users like
|
||||
|
||||
# v1.11.3 / 2021-09-29
|
||||
|
||||
lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node
|
||||
operators, storage providers and clients. It includes many improvements and bug fixes that result in perf
|
||||
lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node
|
||||
operators, storage providers and clients. It includes many improvements and bug fixes that result in perf
|
||||
improvements in different area, like deal making, sealing and so on.
|
||||
|
||||
## Highlights
|
||||
|
||||
- 🌟🌟Introduce `MaxStagingDealsBytes - reject new deals if our staging deals area is full ([filecoin-project/lotus#7276](https://github.com/filecoin-project/lotus/pull/7276))
|
||||
- Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large.
|
||||
- Set `MaxStagingDealsBytes` under the [Dealmaking] section of the markets' subsystem's `config.toml` to reject new incoming deals when the `deal-staging` directory of market subsystem's repo gets too large.
|
||||
- 🌟🌟miner: Command to list/remove expired sectors locally ([filecoin-project/lotus#7140](https://github.com/filecoin-project/lotus/pull/7140))
|
||||
- run `./lotus-miner sectors expired -h` for more details.
|
||||
- run `./lotus-miner sectors expired -h` for more details.
|
||||
- 🚀update to ffi to update-bellperson-proofs-v9-0-2 ([filecoin-project/lotus#7369](https://github.com/filecoin-project/lotus/pull/7369))
|
||||
- MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation!
|
||||
- MinerX fellows(early testers of lotus releases) have reported faster WindowPoSt computation!
|
||||
- 🌟dealpublisher: Fully validate deals before publishing ([filecoin-project/lotus#7234](https://github.com/filecoin-project/lotus/pull/7234))
|
||||
- This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals.
|
||||
- This excludes the expired deals before sending out a PSD message which reduces the chances of PSD message failure due to invalid deals.
|
||||
- 🌟Simple alert system; FD limit alerts ([filecoin-project/lotus#7108](https://github.com/filecoin-project/lotus/pull/7108))
|
||||
|
||||
## New Features
|
||||
@ -531,7 +660,7 @@ improvements in different area, like deal making, sealing and so on.
|
||||
- Turn off patch ([filecoin-project/lotus#7172](https://github.com/filecoin-project/lotus/pull/7172))
|
||||
- test: disable flaky TestSimultaneousTransferLimit ([filecoin-project/lotus#7153](https://github.com/filecoin-project/lotus/pull/7153))
|
||||
|
||||
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
@ -563,46 +692,46 @@ improvements in different area, like deal making, sealing and so on.
|
||||
|
||||
# v1.11.2 / 2021-09-06
|
||||
|
||||
lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators,
|
||||
storage providers and clients.
|
||||
lotus v1.11.2 is a feature release that's **highly recommended ALL lotus users to upgrade**, including node operators,
|
||||
storage providers and clients.
|
||||
|
||||
## Highlights
|
||||
- 🌟🌟🌟 Introduce Dagstore and CARv2 for deal-making (#6671) ([filecoin-project/lotus#6671](https://github.com/filecoin-project/lotus/pull/6671))
|
||||
- **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a
|
||||
- **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a
|
||||
component of the `markets` subsystem in lotus-miner. It is a sharded store to hold large IPLD graphs efficiently,
|
||||
packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It
|
||||
packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It
|
||||
is designed to provide high efficiency and throughput, and minimize resource utilization during deal-making operations.
|
||||
The dagstore also leverages the indexing features of [CARv2](https://github.com/ipld/ipld/blob/master/specs/transport/car/carv2/index.md) to enable plan CAR files to act as read and write
|
||||
blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval
|
||||
The dagstore also leverages the indexing features of [CARv2](https://github.com/ipld/ipld/blob/master/specs/transport/car/carv2/index.md) to enable plan CAR files to act as read and write
|
||||
blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval
|
||||
deal making without requiring intermediate buffers.
|
||||
- In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal
|
||||
making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's
|
||||
- In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal
|
||||
making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's
|
||||
conceptual overview, terminology, directory structure, configuration and so on.
|
||||
- **Note**:
|
||||
- When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly
|
||||
- **Note**:
|
||||
- When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly
|
||||
recommend storage providers to read this [section](https://docs.filecoin.io/mine/lotus/dagstore/#first-time-migration) to learn more about
|
||||
what the process does, what to expect and how monitor it.
|
||||
- It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that
|
||||
- It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that
|
||||
are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost.
|
||||
- ‼️Having your dags initialized will become important in the near feature for you to provide a better storage
|
||||
and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process
|
||||
places relatively high IP workload on your storage system and is better to be carried out gradually and over a
|
||||
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization).
|
||||
and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process
|
||||
places relatively high IP workload on your storage system and is better to be carried out gradually and over a
|
||||
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization).
|
||||
- ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX)
|
||||
release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic.
|
||||
- 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!**
|
||||
- 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!**
|
||||
- 🌟🌟 rpcenc: Support reader redirect ([filecoin-project/lotus#6952](https://github.com/filecoin-project/lotus/pull/6952))
|
||||
- This allows market processes to send piece bytes directly to workers involved on `AddPiece`.
|
||||
- Extending sectors: more practical and flexible tools ([filecoin-project/lotus#6097](https://github.com/filecoin-project/lotus/pull/6097))
|
||||
- `lotus-miner sectors check-expire` to inspect expiring sectors.
|
||||
- `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option
|
||||
like `extension`, `new-expiration` and so on.
|
||||
- `lotus-miner sectors check-expire` to inspect expiring sectors.
|
||||
- `lotus-miner sectors renew` for renewing expiring sectors, see the command help menu for customizable option
|
||||
like `extension`, `new-expiration` and so on.
|
||||
- ‼️ MpoolReplaceCmd ( lotus mpool replace`) now takes FIL for fee-limit ([filecoin-project/lotus#6927](https://github.com/filecoin-project/lotus/pull/6927))
|
||||
- Drop townhall/chainwatch ([filecoin-project/lotus#6912](https://github.com/filecoin-project/lotus/pull/6912))
|
||||
- ChainWatch is no longer supported by lotus.
|
||||
- ChainWatch is no longer supported by lotus.
|
||||
- Configurable CC Sector Expiration ([filecoin-project/lotus#6803](https://github.com/filecoin-project/lotus/pull/6803))
|
||||
- Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC
|
||||
sector, value must be between 180-540 days inclusive.
|
||||
- Set `CommittedCapacitySectorLifetime` in lotus-miner/config.toml to specify the default expiration for a new CC
|
||||
sector, value must be between 180-540 days inclusive.
|
||||
|
||||
## New Features
|
||||
- api/command for encoding actor params ([filecoin-project/lotus#7150](https://github.com/filecoin-project/lotus/pull/7150))
|
||||
@ -669,10 +798,10 @@ storage providers and clients.
|
||||
- remove m1 templates and make area selection multi-optionable ([filecoin-project/lotus#7121](https://github.com/filecoin-project/lotus/pull/7121))
|
||||
- release -> master ([filecoin-project/lotus#7105](https://github.com/filecoin-project/lotus/pull/7105))
|
||||
- Lotus release process - how we make releases ([filecoin-project/lotus#6944](https://github.com/filecoin-project/lotus/pull/6944))
|
||||
- codecov: fix mock name ([filecoin-project/lotus#7039](https://github.com/filecoin-project/lotus/pull/7039))
|
||||
- codecov: fix mock name ([filecoin-project/lotus#7039](https://github.com/filecoin-project/lotus/pull/7039))
|
||||
- codecov: fix regexes ([filecoin-project/lotus#7037](https://github.com/filecoin-project/lotus/pull/7037))
|
||||
- chore: disable flaky test ([filecoin-project/lotus#6957](https://github.com/filecoin-project/lotus/pull/6957))
|
||||
- set buildtype in nerpa and butterfly ([filecoin-project/lotus#6085](https://github.com/filecoin-project/lotus/pull/6085))
|
||||
- chore: disable flaky test ([filecoin-project/lotus#6957](https://github.com/filecoin-project/lotus/pull/6957))
|
||||
- set buildtype in nerpa and butterfly ([filecoin-project/lotus#6085](https://github.com/filecoin-project/lotus/pull/6085))
|
||||
- release v1.11.1 backport -> master ([filecoin-project/lotus#6929](https://github.com/filecoin-project/lotus/pull/6929))
|
||||
- chore: fixup issue templates ([filecoin-project/lotus#6899](https://github.com/filecoin-project/lotus/pull/6899))
|
||||
- bump master version to v1.11.2-dev ([filecoin-project/lotus#6903](https://github.com/filecoin-project/lotus/pull/6903))
|
||||
@ -714,15 +843,15 @@ Contributors
|
||||
|
||||
> Note: for discussion about this release, please comment [here](https://github.com/filecoin-project/lotus/discussions/6904)
|
||||
|
||||
This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes.
|
||||
This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes.
|
||||
|
||||
## Highlights
|
||||
- ⭐️⭐️⭐️[**lotus-miner market subsystem**](https://docs.filecoin.io/mine/lotus/split-markets-miners/#frontmatter-title) is introduced in this release! It is **highly recommended** for storage providers to run markets processes on a separate machine! Doing so, only this machine needs to exposes public ports for deal making. This also means that the other miner operations can now be completely isolated by from the deal making processes and storage providers can stop and restarts the markets process without affecting an ongoing Winning/Window PoSt!
|
||||
- More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts).
|
||||
- More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts).
|
||||
- Base on your system setup(running on separate machines, same machine and so on), please see the suggested practice by community members [here](https://github.com/filecoin-project/lotus/discussions/7047#discussion-3515335).
|
||||
- Note: if you are running lotus-worker on a different machine, you will need to set `MARKETS_API_INFO` for certain CLI to work properly. This will be improved by #7072.
|
||||
- Huge thanks to MinerX fellows for [helping testing the implementation, reporting the issues so they were fixed by now and providing feedbacks](https://github.com/filecoin-project/lotus/discussions/6861) to user docs in the past three weeks!
|
||||
- Config for collateral from miner available balance ([filecoin-project/lotus#6629](https://github.com/filecoin-project/lotus/pull/6629))
|
||||
- Config for collateral from miner available balance ([filecoin-project/lotus#6629](https://github.com/filecoin-project/lotus/pull/6629))
|
||||
- Better control your sector collateral payment by setting `CollateralFromMinerBalance`, `AvailableBalanceBuffer` and `DisableCollateralFallback`.
|
||||
- `CollateralFromMinerBalance`: whether to use available miner balance for sector collateral instead of sending it with each message, default is `false`.
|
||||
- `AvailableBalanceBuffer`: minimum available balance to keep in the miner actor before sending it with messages, default is 0FIL.
|
||||
@ -730,114 +859,114 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
||||
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
||||
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://docs.filecoin.io/mine/lotus/miner-addresses/#control-addresses).
|
||||
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
||||
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
|
||||
|
||||
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
|
||||
|
||||
## New Features
|
||||
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
||||
- **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) :
|
||||
- Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867))
|
||||
- Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854))
|
||||
- splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811))
|
||||
- fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841))
|
||||
- Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833))
|
||||
- cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760))
|
||||
- Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775))
|
||||
- Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817))
|
||||
- Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777))
|
||||
- Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762))
|
||||
- Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756))
|
||||
- Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754))
|
||||
- Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474))
|
||||
- lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840))
|
||||
- add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888))
|
||||
- Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638))
|
||||
- add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640))
|
||||
- Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield.
|
||||
- Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642))
|
||||
- Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743))
|
||||
- Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696))
|
||||
- Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694))
|
||||
- Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620))
|
||||
- commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650))
|
||||
- shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631))
|
||||
|
||||
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
||||
- **⭐️⭐️ Experimental** [Splitstore]((https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)) (more details coming in v1.11.2! Stay tuned! Join the discussion [here](https://github.com/filecoin-project/lotus/discussions/5788) if you have questions!) :
|
||||
- Improve splitstore warmup ([filecoin-project/lotus#6867](https://github.com/filecoin-project/lotus/pull/6867))
|
||||
- Moving GC for badger ([filecoin-project/lotus#6854](https://github.com/filecoin-project/lotus/pull/6854))
|
||||
- splitstore shed utils ([filecoin-project/lotus#6811](https://github.com/filecoin-project/lotus/pull/6811))
|
||||
- fix warmup by decoupling state from message receipt walk ([filecoin-project/lotus#6841](https://github.com/filecoin-project/lotus/pull/6841))
|
||||
- Splitstore: support on-disk marksets using badger ([filecoin-project/lotus#6833](https://github.com/filecoin-project/lotus/pull/6833))
|
||||
- cache loaded block messages ([filecoin-project/lotus#6760](https://github.com/filecoin-project/lotus/pull/6760))
|
||||
- Splitstore: add retention policy option for keeping messages in the hotstore ([filecoin-project/lotus#6775](https://github.com/filecoin-project/lotus/pull/6775))
|
||||
- Introduce the LOTUS_CHAIN_BADGERSTORE_DISABLE_FSYNC envvar ([filecoin-project/lotus#6817](https://github.com/filecoin-project/lotus/pull/6817))
|
||||
- Splitstore: add support for protecting out of chain references in the blockstore ([filecoin-project/lotus#6777](https://github.com/filecoin-project/lotus/pull/6777))
|
||||
- Implement exposed splitstore ([filecoin-project/lotus#6762](https://github.com/filecoin-project/lotus/pull/6762))
|
||||
- Splitstore code reorg ([filecoin-project/lotus#6756](https://github.com/filecoin-project/lotus/pull/6756))
|
||||
- Splitstore: Some small fixes ([filecoin-project/lotus#6754](https://github.com/filecoin-project/lotus/pull/6754))
|
||||
- Splitstore Enhanchements ([filecoin-project/lotus#6474](https://github.com/filecoin-project/lotus/pull/6474))
|
||||
- lotus-shed: initial export cmd for markets related metadata ([filecoin-project/lotus#6840](https://github.com/filecoin-project/lotus/pull/6840))
|
||||
- add a very verbose -vv flag to lotus and lotus-miner. ([filecoin-project/lotus#6888](https://github.com/filecoin-project/lotus/pull/6888))
|
||||
- Add allocated sectorid vis ([filecoin-project/lotus#4638](https://github.com/filecoin-project/lotus/pull/4638))
|
||||
- add a command for compacting sector numbers bitfield ([filecoin-project/lotus#4640](https://github.com/filecoin-project/lotus/pull/4640))
|
||||
- Run `lotus-miner actor compact-allocated` to compact sector number allocations to reduce the size of the allocated sector number bitfield.
|
||||
- Add ChainGetMessagesInTipset API ([filecoin-project/lotus#6642](https://github.com/filecoin-project/lotus/pull/6642))
|
||||
- Handle the --color flag via proper global state ([filecoin-project/lotus#6743](https://github.com/filecoin-project/lotus/pull/6743))
|
||||
- Enable color by default only if os.Stdout is a TTY ([filecoin-project/lotus#6696](https://github.com/filecoin-project/lotus/pull/6696))
|
||||
- Stop outputing ANSI color on non-TTY ([filecoin-project/lotus#6694](https://github.com/filecoin-project/lotus/pull/6694))
|
||||
- Envvar to disable slash filter ([filecoin-project/lotus#6620](https://github.com/filecoin-project/lotus/pull/6620))
|
||||
- commit batch: AggregateAboveBaseFee config ([filecoin-project/lotus#6650](https://github.com/filecoin-project/lotus/pull/6650))
|
||||
- shed tool to estimate aggregate network fees ([filecoin-project/lotus#6631](https://github.com/filecoin-project/lotus/pull/6631))
|
||||
|
||||
## Bug Fixes
|
||||
- Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683))
|
||||
- fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724))
|
||||
- fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802))
|
||||
- test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804))
|
||||
- wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800))
|
||||
- fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787))
|
||||
- ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739))
|
||||
- Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066))
|
||||
- fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688))
|
||||
- Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679))
|
||||
- Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680))
|
||||
- storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653))
|
||||
- Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664))
|
||||
- Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658))
|
||||
- fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635))
|
||||
- remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634))
|
||||
- fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625))
|
||||
|
||||
- Fix padding of deals, which only partially shipped in #5988 ([filecoin-project/lotus#6683](https://github.com/filecoin-project/lotus/pull/6683))
|
||||
- fix deal concurrency test failures by upgrading graphsync and others ([filecoin-project/lotus#6724](https://github.com/filecoin-project/lotus/pull/6724))
|
||||
- fix: on randomness change, use new rand ([filecoin-project/lotus#6805](https://github.com/filecoin-project/lotus/pull/6805)) - fix: always check if StateSearchMessage returns nil ([filecoin-project/lotus#6802](https://github.com/filecoin-project/lotus/pull/6802))
|
||||
- test: fix flaky window post tests ([filecoin-project/lotus#6804](https://github.com/filecoin-project/lotus/pull/6804))
|
||||
- wrap close(wait) with sync.Once to avoid panic ([filecoin-project/lotus#6800](https://github.com/filecoin-project/lotus/pull/6800))
|
||||
- fixes #6786 segfault ([filecoin-project/lotus#6787](https://github.com/filecoin-project/lotus/pull/6787))
|
||||
- ClientRetrieve stops on cancel([filecoin-project/lotus#6739](https://github.com/filecoin-project/lotus/pull/6739))
|
||||
- Fix bugs in sectors extend --v1-sectors ([filecoin-project/lotus#6066](https://github.com/filecoin-project/lotus/pull/6066))
|
||||
- fix "lotus-seed genesis car" error "merkledag: not found" ([filecoin-project/lotus#6688](https://github.com/filecoin-project/lotus/pull/6688))
|
||||
- Get retrieval pricing input should not error out on a deal state fetch ([filecoin-project/lotus#6679](https://github.com/filecoin-project/lotus/pull/6679))
|
||||
- Fix more CID double-encoding as hex ([filecoin-project/lotus#6680](https://github.com/filecoin-project/lotus/pull/6680))
|
||||
- storage: Fix FinalizeSector with sectors in stoage paths ([filecoin-project/lotus#6653](https://github.com/filecoin-project/lotus/pull/6653))
|
||||
- Fix tiny error in check-client-datacap ([filecoin-project/lotus#6664](https://github.com/filecoin-project/lotus/pull/6664))
|
||||
- Fix: precommit_batch method used the wrong cfg.CommitBatchWait ([filecoin-project/lotus#6658](https://github.com/filecoin-project/lotus/pull/6658))
|
||||
- fix ticket expiration check ([filecoin-project/lotus#6635](https://github.com/filecoin-project/lotus/pull/6635))
|
||||
- remove precommit check in handleCommitFailed ([filecoin-project/lotus#6634](https://github.com/filecoin-project/lotus/pull/6634))
|
||||
- fix prove commit aggregate send token amount ([filecoin-project/lotus#6625](https://github.com/filecoin-project/lotus/pull/6625))
|
||||
|
||||
## Improvements
|
||||
- Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895))
|
||||
- rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886))
|
||||
- fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862))
|
||||
- ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872))
|
||||
- feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857))
|
||||
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
|
||||
- easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183))
|
||||
- api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818))
|
||||
- add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815))
|
||||
- sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763))
|
||||
- ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807))
|
||||
- test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758))
|
||||
- Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708))
|
||||
- add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950))
|
||||
- add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690))
|
||||
- get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681))
|
||||
- polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639))
|
||||
- Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672))
|
||||
- add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663))
|
||||
- commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647))
|
||||
- Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638))
|
||||
- Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636))
|
||||
|
||||
- Eliminate inefficiency in markets logging ([filecoin-project/lotus#6895](https://github.com/filecoin-project/lotus/pull/6895))
|
||||
- rename `cmd/lotus{-storage=>}-miner` to match binary. ([filecoin-project/lotus#6886](https://github.com/filecoin-project/lotus/pull/6886))
|
||||
- fix racy TestSimultanenousTransferLimit. ([filecoin-project/lotus#6862](https://github.com/filecoin-project/lotus/pull/6862))
|
||||
- ValidateBlock: Assert that block header height's are greater than parents ([filecoin-project/lotus#6872](https://github.com/filecoin-project/lotus/pull/6872))
|
||||
- feat: Don't panic when api impl is nil ([filecoin-project/lotus#6857](https://github.com/filecoin-project/lotus/pull/6857))
|
||||
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
|
||||
- easy way to make install app ([filecoin-project/lotus#5183](https://github.com/filecoin-project/lotus/pull/5183))
|
||||
- api: Separate the Net interface from Common ([filecoin-project/lotus#6627](https://github.com/filecoin-project/lotus/pull/6627)) - add StateReadState to gateway api ([filecoin-project/lotus#6818](https://github.com/filecoin-project/lotus/pull/6818))
|
||||
- add SealProof in SectorBuilder ([filecoin-project/lotus#6815](https://github.com/filecoin-project/lotus/pull/6815))
|
||||
- sealing: Handle preCommitParams errors more correctly ([filecoin-project/lotus#6763](https://github.com/filecoin-project/lotus/pull/6763))
|
||||
- ClientFindData: always fetch peer id from chain ([filecoin-project/lotus#6807](https://github.com/filecoin-project/lotus/pull/6807))
|
||||
- test: handle null blocks in TestForkRefuseCall ([filecoin-project/lotus#6758](https://github.com/filecoin-project/lotus/pull/6758))
|
||||
- Add more deal details to lotus-miner info ([filecoin-project/lotus#6708](https://github.com/filecoin-project/lotus/pull/6708))
|
||||
- add election backtest ([filecoin-project/lotus#5950](https://github.com/filecoin-project/lotus/pull/5950))
|
||||
- add dollar sign ([filecoin-project/lotus#6690](https://github.com/filecoin-project/lotus/pull/6690))
|
||||
- get-actor cli spelling fix ([filecoin-project/lotus#6681](https://github.com/filecoin-project/lotus/pull/6681))
|
||||
- polish(statetree): accept a context in statetree diff for timeouts ([filecoin-project/lotus#6639](https://github.com/filecoin-project/lotus/pull/6639))
|
||||
- Add helptext to lotus chain export ([filecoin-project/lotus#6672](https://github.com/filecoin-project/lotus/pull/6672))
|
||||
- add an incremental nonce itest. ([filecoin-project/lotus#6663](https://github.com/filecoin-project/lotus/pull/6663))
|
||||
- commit batch: Initialize the FailedSectors map ([filecoin-project/lotus#6647](https://github.com/filecoin-project/lotus/pull/6647))
|
||||
- Fast-path retry submitting commit aggregate if commit is still valid ([filecoin-project/lotus#6638](https://github.com/filecoin-project/lotus/pull/6638))
|
||||
- Reuse timers in sealing batch logic ([filecoin-project/lotus#6636](https://github.com/filecoin-project/lotus/pull/6636))
|
||||
|
||||
## Dependency Updates
|
||||
- Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890))
|
||||
- update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885))
|
||||
- Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674))
|
||||
- github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2):
|
||||
- github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2):
|
||||
- github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1)
|
||||
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e)
|
||||
- github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1)
|
||||
- update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764))
|
||||
|
||||
- Update to proof v8.0.3 ([filecoin-project/lotus#6890](https://github.com/filecoin-project/lotus/pull/6890))
|
||||
- update to go-fil-market v1.6.0 ([filecoin-project/lotus#6885](https://github.com/filecoin-project/lotus/pull/6885))
|
||||
- Bump go-multihash, adjust test for supported version ([filecoin-project/lotus#6674](https://github.com/filecoin-project/lotus/pull/6674))
|
||||
- github.com/filecoin-project/go-data-transfer (v1.6.0 -> v1.7.2):
|
||||
- github.com/filecoin-project/go-fil-markets (v1.5.0 -> v1.6.2):
|
||||
- github.com/filecoin-project/go-padreader (v0.0.0-20200903213702-ed5fae088b20 -> v0.0.0-20210723183308-812a16dc01b1)
|
||||
- github.com/filecoin-project/go-state-types (v0.1.1-0.20210506134452-99b279731c48 -> v0.1.1-0.20210810190654-139e0e79e69e)
|
||||
- github.com/filecoin-project/go-statemachine (v0.0.0-20200925024713-05bd7c71fbfe -> v1.0.1)
|
||||
- update go-libp2p-pubsub to v0.5.0 ([filecoin-project/lotus#6764](https://github.com/filecoin-project/lotus/pull/6764))
|
||||
|
||||
## Others
|
||||
- Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051))
|
||||
- v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966))
|
||||
- Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965))
|
||||
- Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933))
|
||||
- Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900))
|
||||
- Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897))
|
||||
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880))
|
||||
- Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879))
|
||||
- Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856))
|
||||
- Fix links in issue templates
|
||||
- Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798)
|
||||
- Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837))
|
||||
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
|
||||
- release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828))
|
||||
- Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773))
|
||||
- Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757))
|
||||
- Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747))
|
||||
- Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710))
|
||||
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
|
||||
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
|
||||
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
|
||||
|
||||
- Master->v1.11.1 ([filecoin-project/lotus#7051](https://github.com/filecoin-project/lotus/pull/7051))
|
||||
- v1.11.1-rc2 ([filecoin-project/lotus#6966](https://github.com/filecoin-project/lotus/pull/6966))
|
||||
- Backport master -> v1.11.1 ([filecoin-project/lotus#6965](https://github.com/filecoin-project/lotus/pull/6965))
|
||||
- Fixes in master -> release ([filecoin-project/lotus#6933](https://github.com/filecoin-project/lotus/pull/6933))
|
||||
- Add changelog for v1.11.1-rc1 and bump the version ([filecoin-project/lotus#6900](https://github.com/filecoin-project/lotus/pull/6900))
|
||||
- Fix merge release -> v1.11.1 ([filecoin-project/lotus#6897](https://github.com/filecoin-project/lotus/pull/6897))
|
||||
- Update RELEASE_ISSUE_TEMPLATE.md ([filecoin-project/lotus#6880](https://github.com/filecoin-project/lotus/pull/6880))
|
||||
- Add github actions for staled pr ([filecoin-project/lotus#6879](https://github.com/filecoin-project/lotus/pull/6879))
|
||||
- Update issue templates and add templates for M1 ([filecoin-project/lotus#6856](https://github.com/filecoin-project/lotus/pull/6856))
|
||||
- Fix links in issue templates
|
||||
- Update issue templates to forms ([filecoin-project/lotus#6798](https://github.com/filecoin-project/lotus/pull/6798)
|
||||
- Nerpa v13 upgrade ([filecoin-project/lotus#6837](https://github.com/filecoin-project/lotus/pull/6837))
|
||||
- add docker-compose file ([filecoin-project/lotus#6544](https://github.com/filecoin-project/lotus/pull/6544))
|
||||
- release -> master ([filecoin-project/lotus#6828](https://github.com/filecoin-project/lotus/pull/6828))
|
||||
- Resurrect CODEOWNERS, but for maintainers group ([filecoin-project/lotus#6773](https://github.com/filecoin-project/lotus/pull/6773))
|
||||
- Master disclaimer ([filecoin-project/lotus#6757](https://github.com/filecoin-project/lotus/pull/6757))
|
||||
- Create stale.yml ([filecoin-project/lotus#6747](https://github.com/filecoin-project/lotus/pull/6747))
|
||||
- Release template: Update all testnet infra at once ([filecoin-project/lotus#6710](https://github.com/filecoin-project/lotus/pull/6710))
|
||||
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
|
||||
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
|
||||
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
@ -878,7 +1007,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
||||
| dependabot[bot] | 1 | +3/-3 | 1 |
|
||||
| zhoutian527 | 1 | +2/-2 | 1 |
|
||||
| xloem | 1 | +4/-0 | 1 |
|
||||
| @travisperson| 2 | +2/-2 | 3 |
|
||||
| | 2 | +2/-2 | 3 |
|
||||
| Liviu Damian | 2 | +2/-2 | 2 |
|
||||
| @jimpick | 2 | +2/-2 | 2 |
|
||||
| Frank | 1 | +3/-0 | 1 |
|
||||
@ -887,9 +1016,10 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
||||
|
||||
# 1.11.0 / 2021-07-22
|
||||
|
||||
This is a **highly recommended** release of Lotus that have many bug fixes, improvements and new features.
|
||||
This is a **highly recommended** release of Lotus that have many bug fixes, improvements and new features.
|
||||
|
||||
## Highlights
|
||||
- Miner SimultaneousTransfers config ([filecoin-project/lotus#6612](https://github.com/filecoin-project/lotus/pull/6612))
|
||||
- Miner SimultaneousTransfers config ([filecoin-project/lotus#6612](https://github.com/filecoin-project/lotus/pull/6612))
|
||||
- Set `SimultaneousTransfers` in lotus miner config to configure the maximum number of parallel online data transfers, including both storage and retrieval deals.
|
||||
- Dynamic Retrieval pricing ([filecoin-project/lotus#6175](https://github.com/filecoin-project/lotus/pull/6175))
|
||||
@ -898,7 +1028,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- run `lotus mpool manage and follow the instructions!
|
||||
- Demo available at https://www.youtube.com/watch?v=QDocpLQjZgQ.
|
||||
- Add utils to use multisigs as miner owners ([filecoin-project/lotus#6490](https://github.com/filecoin-project/lotus/pull/6490))
|
||||
|
||||
|
||||
## More New Features
|
||||
- feat: implement lotus-sim ([filecoin-project/lotus#6406](https://github.com/filecoin-project/lotus/pull/6406))
|
||||
- implement a command to export a car ([filecoin-project/lotus#6405](https://github.com/filecoin-project/lotus/pull/6405))
|
||||
@ -915,11 +1045,11 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Transplant some useful commands to lotus-shed actor ([filecoin-project/lotus#5913](https://github.com/filecoin-project/lotus/pull/5913))
|
||||
- run `lotus-shed actor`
|
||||
- actor wrapper codegen ([filecoin-project/lotus#6108](https://github.com/filecoin-project/lotus/pull/6108))
|
||||
- Add a shed util to count miners by post type ([filecoin-project/lotus#6169](https://github.com/filecoin-project/lotus/pull/6169))
|
||||
- Add a shed util to count miners by post type ([filecoin-project/lotus#6169](https://github.com/filecoin-project/lotus/pull/6169))
|
||||
- shed: command to list duplicate messages in tipsets (steb) ([filecoin-project/lotus#5847](https://github.com/filecoin-project/lotus/pull/5847))
|
||||
- feat: allow checkpointing to forks ([filecoin-project/lotus#6107](https://github.com/filecoin-project/lotus/pull/6107))
|
||||
- Add a CLI tool for miner proving deadline ([filecoin-project/lotus#6132](https://github.com/filecoin-project/lotus/pull/6132))
|
||||
- run `lotus state miner-proving-deadline`
|
||||
- Add a CLI tool for miner proving deadline ([filecoin-project/lotus#6132](https://github.com/filecoin-project/lotus/pull/6132))
|
||||
- run `lotus state miner-proving-deadline`
|
||||
|
||||
|
||||
## Bug Fixes
|
||||
@ -928,7 +1058,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Make query-ask CLI more graceful ([filecoin-project/lotus#6590](https://github.com/filecoin-project/lotus/pull/6590))
|
||||
- scale up sector expiration to avoid sector expire in batch-pre-commit waitting ([filecoin-project/lotus#6566](https://github.com/filecoin-project/lotus/pull/6566))
|
||||
- Fix an error in msigLockCancel ([filecoin-project/lotus#6582](https://github.com/filecoin-project/lotus/pull/6582)
|
||||
- fix circleci being out of sync. ([filecoin-project/lotus#6573](https://github.com/filecoin-project/lotus/pull/6573))
|
||||
- fix circleci being out of sync. ([filecoin-project/lotus#6573](https://github.com/filecoin-project/lotus/pull/6573))
|
||||
- Fix helptext for ask price([filecoin-project/lotus#6560](https://github.com/filecoin-project/lotus/pull/6560))
|
||||
- fix commit finalize failed ([filecoin-project/lotus#6521](https://github.com/filecoin-project/lotus/pull/6521))
|
||||
- Fix soup ([filecoin-project/lotus#6501](https://github.com/filecoin-project/lotus/pull/6501))
|
||||
@ -953,7 +1083,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Speed up StateListMessages in some cases ([filecoin-project/lotus#6007](https://github.com/filecoin-project/lotus/pull/6007))
|
||||
- fix(splitstore): fix a panic on revert-only head changes ([filecoin-project/lotus#6133](https://github.com/filecoin-project/lotus/pull/6133))
|
||||
- drand: fix beacon cache ([filecoin-project/lotus#6164](https://github.com/filecoin-project/lotus/pull/6164))
|
||||
|
||||
|
||||
## Improvements
|
||||
- gateway: Add support for Version method ([filecoin-project/lotus#6618](https://github.com/filecoin-project/lotus/pull/6618))
|
||||
- revamped integration test kit (aka. Operation Sparks Joy) ([filecoin-project/lotus#6329](https://github.com/filecoin-project/lotus/pull/6329))
|
||||
@ -988,7 +1118,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Remove log line when tracing is not configured ([filecoin-project/lotus#6334](https://github.com/filecoin-project/lotus/pull/6334))
|
||||
- separate tracing environment variables ([filecoin-project/lotus#6323](https://github.com/filecoin-project/lotus/pull/6323))
|
||||
- feat: log dispute rate ([filecoin-project/lotus#6322](https://github.com/filecoin-project/lotus/pull/6322))
|
||||
- Move verifreg shed utils to CLI ([filecoin-project/lotus#6135](https://github.com/filecoin-project/lotus/pull/6135))
|
||||
- Move verifreg shed utils to CLI ([filecoin-project/lotus#6135](https://github.com/filecoin-project/lotus/pull/6135))
|
||||
- consider storiface.PathStorage when calculating storage requirements ([filecoin-project/lotus#6233](https://github.com/filecoin-project/lotus/pull/6233))
|
||||
- `storage` module: add go docs and minor code quality refactors ([filecoin-project/lotus#6259](https://github.com/filecoin-project/lotus/pull/6259))
|
||||
- Increase data transfer timeouts ([filecoin-project/lotus#6300](https://github.com/filecoin-project/lotus/pull/6300))
|
||||
@ -1005,7 +1135,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Testground checks on push ([filecoin-project/lotus#5887](https://github.com/filecoin-project/lotus/pull/5887))
|
||||
- Use EmptyTSK where appropriate ([filecoin-project/lotus#6134](https://github.com/filecoin-project/lotus/pull/6134))
|
||||
- upgrade `lotus-soup` testplans and reduce deals concurrency to a single miner ([filecoin-project/lotus#6122](https://github.com/filecoin-project/lotus/pull/6122)
|
||||
|
||||
|
||||
## Dependency Updates
|
||||
- downgrade libp2p/go-libp2p-yamux to v0.5.1. ([filecoin-project/lotus#6605](https://github.com/filecoin-project/lotus/pull/6605))
|
||||
- Update libp2p to 0.14.2 ([filecoin-project/lotus#6404](https://github.com/filecoin-project/lotus/pull/6404))
|
||||
@ -1013,7 +1143,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Use new actor tags ([filecoin-project/lotus#6291](https://github.com/filecoin-project/lotus/pull/6291))
|
||||
- chore: update go-libp2p ([filecoin-project/lotus#6231](https://github.com/filecoin-project/lotus/pull/6231))
|
||||
- Update ffi to proofs v7 ([filecoin-project/lotus#6150](https://github.com/filecoin-project/lotus/pull/6150))
|
||||
|
||||
|
||||
## Others
|
||||
- Initial draft: basic build instructions on Readme ([filecoin-project/lotus#6498](https://github.com/filecoin-project/lotus/pull/6498))
|
||||
- Remove rc changelog, compile the new changelog for final release only ([filecoin-project/lotus#6444](https://github.com/filecoin-project/lotus/pull/6444))
|
||||
@ -1031,8 +1161,8 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
- Introduce a release issue template ([filecoin-project/lotus#5826](https://github.com/filecoin-project/lotus/pull/5826))
|
||||
- This is a 1:1 forward-port of PR#6183 from 1.9.x to master ([filecoin-project/lotus#6196](https://github.com/filecoin-project/lotus/pull/6196))
|
||||
- Update cli gen ([filecoin-project/lotus#6155](https://github.com/filecoin-project/lotus/pull/6155))
|
||||
- Generate CLI docs ([filecoin-project/lotus#6145](https://github.com/filecoin-project/lotus/pull/6145))
|
||||
|
||||
- Generate CLI docs ([filecoin-project/lotus#6145](https://github.com/filecoin-project/lotus/pull/6145))
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
@ -1044,7 +1174,7 @@ This is a **highly recommended** release of Lotus that have many bug fixes, impr
|
||||
| @Stebalien | 106 | +7653/-2718 | 273 |
|
||||
| dirkmc | 11 | +2580/-1371 | 77 |
|
||||
| @dirkmc | 39 | +1865/-1194 | 79 |
|
||||
| @Kubuxu | 19 | +1973/-485 | 81 |
|
||||
| | 19 | +1973/-485 | 81 |
|
||||
| @vyzo | 4 | +1748/-330 | 50 |
|
||||
| @aarshkshah1992 | 5 | +1462/-213 | 27 |
|
||||
| @coryschwartz | 35 | +568/-206 | 59 |
|
||||
@ -1081,10 +1211,10 @@ This is an optional but **highly recommended** release of Lotus for lotus miners
|
||||
## New Features
|
||||
- commit batch: AggregateAboveBaseFee config #6650
|
||||
- `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation!
|
||||
|
||||
|
||||
## Bug Fixes
|
||||
- storage: Fix FinalizeSector with sectors in storage paths #6652
|
||||
- Fix tiny error in check-client-datacap #6664
|
||||
- Fix tiny error in check-client-datacap #6664
|
||||
- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658
|
||||
- to optimize the batchwait #6636
|
||||
- fix getTicket: sector precommitted but expired case #6635
|
||||
@ -1113,10 +1243,10 @@ This is an optional but **highly recommended** release of Lotus for lotus miners
|
||||
## New Features
|
||||
- commit batch: AggregateAboveBaseFee config #6650
|
||||
- `AggregateAboveBaseFee` is added to miner sealing configuration for setting the network base fee to start aggregating proofs. When the network base fee is lower than this value, the prove commits will be submitted individually via `ProveCommitSector`. According to the [Batch Incentive Alignment](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md#batch-incentive-alignment) introduced in FIP-0013, we recommend miners to set this value to 0.15 nanoFIL(which is the default value) to avoid unexpected aggregation fee in burn and enjoy the most benefits of aggregation!
|
||||
|
||||
|
||||
## Bug Fixes
|
||||
- storage: Fix FinalizeSector with sectors in storage paths #6652
|
||||
- Fix tiny error in check-client-datacap #6664
|
||||
- Fix tiny error in check-client-datacap #6664
|
||||
- Fix: precommit_batch method used the wrong cfg.PreCommitBatchWait #6658
|
||||
- to optimize the batchwait #6636
|
||||
- fix getTicket: sector precommitted but expired case #6635
|
||||
@ -1159,10 +1289,10 @@ FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.m
|
||||
**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.**
|
||||
|
||||
Note:
|
||||
- We recommend to keep `PreCommitSectorsBatch` as 1.
|
||||
- We recommend miners to set `PreCommitBatchWait` lower than 30 hours.
|
||||
- We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures
|
||||
due to expirations.
|
||||
- We recommend to keep `PreCommitSectorsBatch` as 1.
|
||||
- We recommend miners to set `PreCommitBatchWait` lower than 30 hours.
|
||||
- We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures
|
||||
due to expirations.
|
||||
|
||||
### Projected state tree growth
|
||||
|
||||
@ -1173,9 +1303,9 @@ Given these assumptions:
|
||||
- We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅
|
||||
- We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day.
|
||||
- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day.
|
||||
- Nearly all of the state-tree growth is expected to come from new sector metadata.
|
||||
- Nearly all of the state-tree growth is expected to come from new sector metadata.
|
||||
- We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day).
|
||||
- Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree.
|
||||
- Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree.
|
||||
|
||||
### Future improvements
|
||||
|
||||
@ -2864,4 +2994,4 @@ We are grateful for every contribution!
|
||||
|
||||
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
|
||||
|
||||
A huge thank you to all contributors for this testnet release!
|
||||
A huge thank you to all contributors for this testnet release!
|
@ -53,8 +53,9 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
&& mkdir -p /etc/OpenCL/vendors \
|
||||
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
||||
|
||||
###
|
||||
FROM base AS lotus
|
||||
|
@ -92,7 +92,8 @@ type StorageMiner interface {
|
||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
|
||||
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
|
||||
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
|
||||
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||
// automatically removes it from storage
|
||||
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
|
||||
@ -100,8 +101,7 @@ type StorageMiner interface {
|
||||
// Returns null if message wasn't sent
|
||||
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
|
||||
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
|
||||
// Returns null if message wasn't sent
|
||||
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -26,6 +27,7 @@ func goCmd() string {
|
||||
}
|
||||
|
||||
func TestDoesntDependOnFFI(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_001
|
||||
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -38,6 +40,7 @@ func TestDoesntDependOnFFI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDoesntDependOnBuild(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_002
|
||||
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -50,6 +53,7 @@ func TestDoesntDependOnBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReturnTypes(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_001
|
||||
errType := reflect.TypeOf(new(error)).Elem()
|
||||
bareIface := reflect.TypeOf(new(interface{})).Elem()
|
||||
jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem()
|
||||
@ -115,6 +119,7 @@ func TestReturnTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPermTags(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_PERM_TAGS_001
|
||||
_ = PermissionedFullAPI(&FullNodeStruct{})
|
||||
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
||||
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
||||
|
@ -122,7 +122,7 @@ func init() {
|
||||
addExample(api.FullAPIVersion1)
|
||||
addExample(api.PCHInbound)
|
||||
addExample(time.Minute)
|
||||
addExample(graphsync.RequestID(4))
|
||||
addExample(graphsync.NewRequestID())
|
||||
addExample(datatransfer.TransferID(3))
|
||||
addExample(datatransfer.Ongoing)
|
||||
addExample(storeIDExample)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -29,6 +30,7 @@ type StrC struct {
|
||||
}
|
||||
|
||||
func TestGetInternalStructs(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
|
||||
var proxy StrA
|
||||
|
||||
sts := GetInternalStructs(&proxy)
|
||||
@ -44,6 +46,7 @@ func TestGetInternalStructs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNestedInternalStructs(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
|
||||
var proxy StrC
|
||||
|
||||
// check that only the top-level internal struct gets picked up
|
||||
|
17
api/types.go
17
api/types.go
@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
@ -12,7 +14,6 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-graphsync"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
@ -58,7 +59,7 @@ type MessageSendSpec struct {
|
||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||
type GraphSyncDataTransfer struct {
|
||||
// GraphSync request id for this transfer
|
||||
RequestID graphsync.RequestID
|
||||
RequestID *graphsync.RequestID
|
||||
// Graphsync state for this transfer
|
||||
RequestState string
|
||||
// If a channel ID is present, indicates whether this is the current graphsync request for this channel
|
||||
@ -124,12 +125,6 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
|
||||
return channel
|
||||
}
|
||||
|
||||
type NetBlockList struct {
|
||||
Peers []peer.ID
|
||||
IPAddrs []string
|
||||
IPSubnets []string
|
||||
}
|
||||
|
||||
type NetStat struct {
|
||||
System *network.ScopeStat `json:",omitempty"`
|
||||
Transient *network.ScopeStat `json:",omitempty"`
|
||||
@ -152,6 +147,12 @@ type NetLimit struct {
|
||||
FD int
|
||||
}
|
||||
|
||||
type NetBlockList struct {
|
||||
Peers []peer.ID
|
||||
IPAddrs []string
|
||||
IPSubnets []string
|
||||
}
|
||||
|
||||
type ExtendedPeerInfo struct {
|
||||
ID peer.ID
|
||||
Agent string
|
||||
|
@ -57,8 +57,8 @@ var (
|
||||
FullAPIVersion0 = newVer(1, 5, 0)
|
||||
FullAPIVersion1 = newVer(2, 2, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 4, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 5, 0)
|
||||
MinerAPIVersion0 = newVer(1, 5, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 6, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
@ -1,10 +1,10 @@
|
||||
//stm: #unit
|
||||
package badgerbs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -20,6 +20,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBadgerBlockstore(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
(&Suite{
|
||||
NewBlockstore: newBlockstore(DefaultOptions),
|
||||
OpenBlockstore: openBlockstore(DefaultOptions),
|
||||
@ -38,6 +40,8 @@ func TestBadgerBlockstore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStorageKey(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_STORAGE_KEY_001
|
||||
bs, _ := newBlockstore(DefaultOptions)(t)
|
||||
bbs := bs.(*Blockstore)
|
||||
defer bbs.Close() //nolint:errcheck
|
||||
@ -73,20 +77,13 @@ func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (
|
||||
return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
|
||||
tb.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
path = tb.TempDir()
|
||||
|
||||
db, err := Open(optsSupplier(path))
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
|
||||
tb.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
return db, path
|
||||
}
|
||||
}
|
||||
@ -100,17 +97,10 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB,
|
||||
|
||||
func testMove(t *testing.T, optsF func(string) Options) {
|
||||
ctx := context.Background()
|
||||
basePath, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
basePath := t.TempDir()
|
||||
|
||||
dbPath := filepath.Join(basePath, "db")
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(basePath)
|
||||
})
|
||||
|
||||
db, err := Open(optsF(dbPath))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -265,10 +255,16 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
||||
}
|
||||
|
||||
func TestMoveNoPrefix(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
|
||||
testMove(t, DefaultOptions)
|
||||
}
|
||||
|
||||
func TestMoveWithPrefix(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
|
||||
testMove(t, func(path string) Options {
|
||||
opts := DefaultOptions(path)
|
||||
opts.Prefix = "/prefixed/"
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package badgerbs
|
||||
|
||||
import (
|
||||
@ -44,6 +45,8 @@ func (s *Suite) RunTests(t *testing.T, prefix string) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -57,6 +60,8 @@ func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -68,6 +73,9 @@ func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -85,6 +93,8 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestHas(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -106,6 +116,9 @@ func (s *Suite) TestHas(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -123,6 +136,9 @@ func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_SIZE_001
|
||||
ctx := context.Background()
|
||||
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
@ -154,6 +170,8 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestAllKeysSimple(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
defer func() { require.NoError(t, c.Close()) }()
|
||||
@ -170,6 +188,9 @@ func (s *Suite) TestAllKeysSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
defer func() { require.NoError(t, c.Close()) }()
|
||||
@ -200,6 +221,7 @@ func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestDoubleClose(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
c, ok := bs.(io.Closer)
|
||||
if !ok {
|
||||
@ -210,6 +232,9 @@ func (s *Suite) TestDoubleClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, path := s.NewBlockstore(t)
|
||||
c, ok := bs.(io.Closer)
|
||||
@ -236,6 +261,10 @@ func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutMany(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_PUT_MANY_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -268,6 +297,11 @@ func (s *Suite) TestPutMany(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestDelete(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_POOLED_STORAGE_HAS_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001, @SPLITSTORE_BADGER_HAS_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_MANY_001
|
||||
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
|
@ -1,8 +1,6 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@ -11,14 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCheckpoint(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "checkpoint.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
dir := t.TempDir()
|
||||
|
||||
path := filepath.Join(dir, "checkpoint")
|
||||
|
||||
|
@ -2,8 +2,6 @@ package splitstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@ -12,14 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestColdSet(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "coldset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
dir := t.TempDir()
|
||||
|
||||
path := filepath.Join(dir, "coldset")
|
||||
|
||||
|
@ -1,8 +1,7 @@
|
||||
//stm: #unit
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -10,6 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestMapMarkSet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
testMarkSet(t, "map")
|
||||
testMarkSetRecovery(t, "map")
|
||||
testMarkSetMarkMany(t, "map")
|
||||
@ -18,6 +19,8 @@ func TestMapMarkSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBadgerMarkSet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
bs := badgerMarkSetBatchSize
|
||||
badgerMarkSetBatchSize = 1
|
||||
t.Cleanup(func() {
|
||||
@ -31,14 +34,7 @@ func TestBadgerMarkSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func testMarkSet(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -46,6 +42,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_CREATE_001
|
||||
hotSet, err := env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -65,6 +62,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_HAS_001
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
@ -94,6 +92,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_MARK_001
|
||||
hotSet.Mark(k1) //nolint
|
||||
hotSet.Mark(k2) //nolint
|
||||
coldSet.Mark(k3) //nolint
|
||||
@ -144,6 +143,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
mustNotHave(coldSet, k3)
|
||||
mustNotHave(coldSet, k4)
|
||||
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001
|
||||
err = hotSet.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -156,14 +156,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -171,6 +164,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -225,14 +219,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -324,14 +311,7 @@ func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -437,14 +417,7 @@ func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetMarkMany(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
|
@ -1,12 +1,11 @@
|
||||
//stm: #unit
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@ -85,14 +84,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, cfg)
|
||||
@ -228,10 +220,16 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
}
|
||||
|
||||
func TestSplitStoreCompaction(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
testSplitStore(t, &Config{MarkSetType: "map"})
|
||||
}
|
||||
|
||||
func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
bs := badgerMarkSetBatchSize
|
||||
badgerMarkSetBatchSize = 1
|
||||
t.Cleanup(func() {
|
||||
@ -241,6 +239,9 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
ctx := context.Background()
|
||||
chain := &mockChain{t: t}
|
||||
|
||||
@ -277,14 +278,7 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
@ -424,14 +418,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
@ -531,14 +518,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
@ -13,6 +14,9 @@ import (
|
||||
)
|
||||
|
||||
func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_START_001
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_PUT_001, @SPLITSTORE_TIMED_BLOCKSTORE_HAS_001, @SPLITSTORE_TIMED_BLOCKSTORE_GET_001
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_ALL_KEYS_CHAN_001
|
||||
tc := NewTimedCacheBlockstore(10 * time.Millisecond)
|
||||
mClock := clock.NewMock()
|
||||
mClock.Set(time.Now())
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
@ -15,6 +16,7 @@ var (
|
||||
)
|
||||
|
||||
func TestUnionBlockstore_Get(t *testing.T) {
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_GET_001
|
||||
ctx := context.Background()
|
||||
m1 := NewMemory()
|
||||
m2 := NewMemory()
|
||||
@ -34,6 +36,9 @@ func TestUnionBlockstore_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) {
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_001, @SPLITSTORE_UNION_BLOCKSTORE_HAS_001
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_MANY_001, @SPLITSTORE_UNION_BLOCKSTORE_DELETE_001
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_ALL_KEYS_CHAN_001
|
||||
ctx := context.Background()
|
||||
m1 := NewMemory()
|
||||
m2 := NewMemory()
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package build
|
||||
|
||||
import (
|
||||
@ -7,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestOpenRPCDiscoverJSON_Version(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_OPENRPC_VERSION_001
|
||||
// openRPCDocVersion is the current OpenRPC version of the API docs.
|
||||
openRPCDocVersion := "1.2.6"
|
||||
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.15.1-dev"
|
||||
const BuildVersion = "1.15.2-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package adt
|
||||
|
||||
import (
|
||||
@ -44,6 +45,7 @@ func TestDiffAdtArray(t *testing.T) {
|
||||
|
||||
changes := new(TestDiffArray)
|
||||
|
||||
//stm: @CHAIN_ADT_ARRAY_DIFF_001
|
||||
assert.NoError(t, DiffAdtArray(arrA, arrB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
@ -98,6 +100,7 @@ func TestDiffAdtMap(t *testing.T) {
|
||||
|
||||
changes := new(TestDiffMap)
|
||||
|
||||
//stm: @CHAIN_ADT_MAP_DIFF_001
|
||||
assert.NoError(t, DiffAdtMap(mapA, mapB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package aerrors_test
|
||||
|
||||
import (
|
||||
@ -11,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFatalError(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001
|
||||
e1 := xerrors.New("out of disk space")
|
||||
e2 := xerrors.Errorf("could not put node: %w", e1)
|
||||
e3 := xerrors.Errorf("could not save head: %w", e2)
|
||||
@ -24,6 +26,7 @@ func TestFatalError(t *testing.T) {
|
||||
assert.True(t, IsFatal(aw4), "should be fatal")
|
||||
}
|
||||
func TestAbsorbeError(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001
|
||||
e1 := xerrors.New("EOF")
|
||||
e2 := xerrors.Errorf("could not decode: %w", e1)
|
||||
ae := Absorb(e2, 35, "failed to decode CBOR")
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package policy
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
for t := range miner0.SupportedProofTypes {
|
||||
oldTypes = append(oldTypes, t)
|
||||
}
|
||||
//stm: @BLOCKCHAIN_POLICY_SET_MAX_SUPPORTED_PROOF_TYPES_001
|
||||
t.Cleanup(func() {
|
||||
SetSupportedProofTypes(oldTypes...)
|
||||
})
|
||||
@ -33,6 +35,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
abi.RegisteredSealProof_StackedDrg2KiBV1: {},
|
||||
},
|
||||
)
|
||||
//stm: @BLOCKCHAIN_POLICY_ADD_MAX_SUPPORTED_PROOF_TYPES_001
|
||||
AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1)
|
||||
require.EqualValues(t,
|
||||
miner0.SupportedProofTypes,
|
||||
@ -45,6 +48,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
|
||||
// Tests assumptions about policies being the same between actor versions.
|
||||
func TestAssumptions(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_POLICY_ASSUMPTIONS_001
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0)
|
||||
require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
|
||||
require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension)
|
||||
@ -58,6 +62,7 @@ func TestAssumptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPartitionSizes(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_PARTITION_SIZES_001
|
||||
for _, p := range abi.SealProofInfos {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
require.NoError(t, err)
|
||||
@ -71,6 +76,7 @@ func TestPartitionSizes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPoStSize(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
|
||||
v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
|
||||
require.Equal(t, 4, v12PoStSize)
|
||||
require.NoError(t, err)
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//Only tests external library behavior, therefore it should not be annotated
|
||||
package drand
|
||||
|
||||
import (
|
||||
|
@ -2,6 +2,7 @@ package filcns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
@ -94,7 +95,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
}()
|
||||
|
||||
ctx = blockstore.WithHotView(ctx)
|
||||
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) {
|
||||
makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (vm.Interface, error) {
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
Epoch: e,
|
||||
@ -108,10 +109,23 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
||||
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" {
|
||||
// This is needed so that the FVM does not have to duplicate the genesis vesting schedule, one
|
||||
// of the components of the circ supply calc.
|
||||
// This field is NOT needed by the LegacyVM, and also NOT needed by the FVM from v15 onwards.
|
||||
filVested, err := sm.GetFilVested(ctx, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmopt.FilVested = filVested
|
||||
return vm.NewFVM(ctx, vmopt)
|
||||
}
|
||||
|
||||
return sm.VMConstructor()(ctx, vmopt)
|
||||
}
|
||||
|
||||
runCron := func(vmCron *vm.VM, epoch abi.ChainEpoch) error {
|
||||
runCron := func(vmCron vm.Interface, epoch abi.ChainEpoch) error {
|
||||
cronMsg := &types.Message{
|
||||
To: cron.Address,
|
||||
From: builtin.SystemActorAddr,
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package events
|
||||
|
||||
import (
|
||||
@ -358,6 +359,7 @@ func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...in
|
||||
var _ EventAPI = &fakeCS{}
|
||||
|
||||
func TestAt(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
require.NoError(t, err)
|
||||
@ -418,6 +420,7 @@ func TestAt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtNullTrigger(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
require.NoError(t, err)
|
||||
@ -447,6 +450,7 @@ func TestAtNullTrigger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtNullConf(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -485,6 +489,7 @@ func TestAtNullConf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtStart(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -515,6 +520,7 @@ func TestAtStart(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtStartConfidence(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -541,6 +547,7 @@ func TestAtStartConfidence(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChained(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -571,6 +578,7 @@ func TestAtChained(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChainedConfidence(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -601,6 +609,7 @@ func TestAtChainedConfidence(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChainedConfidenceNull(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -632,6 +641,7 @@ func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Messag
|
||||
}
|
||||
|
||||
func TestCalled(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -837,6 +847,7 @@ func TestCalled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledTimeout(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -897,6 +908,7 @@ func TestCalledTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledOrder(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -953,6 +965,7 @@ func TestCalledOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledNull(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1011,6 +1024,7 @@ func TestCalledNull(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveTriggersOnMessage(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1094,6 +1108,7 @@ type testStateChange struct {
|
||||
}
|
||||
|
||||
func TestStateChanged(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1179,6 +1194,7 @@ func TestStateChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateChangedRevert(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1255,6 +1271,7 @@ func TestStateChangedRevert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateChangedTimeout(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
timeoutHeight := abi.ChainEpoch(20)
|
||||
confidence := 3
|
||||
|
||||
@ -1332,6 +1349,7 @@ func TestStateChangedTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledMultiplePerEpoch(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1384,6 +1402,7 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCachedSameBlock(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
_, err := NewEvents(context.Background(), fcs)
|
||||
@ -1418,6 +1437,7 @@ func (t *testObserver) Revert(_ context.Context, from, to *types.TipSet) error {
|
||||
}
|
||||
|
||||
func TestReconnect(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package state
|
||||
|
||||
import (
|
||||
@ -35,6 +36,12 @@ func init() {
|
||||
}
|
||||
|
||||
func TestMarketPredicates(t *testing.T) {
|
||||
//stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001
|
||||
//stm: @EVENTS_PREDICATES_DEAL_CHANGED_FOR_IDS
|
||||
|
||||
//stm: @EVENTS_PREDICATES_ON_BALANCE_CHANGED_001, @EVENTS_PREDICATES_BALANCE_CHANGED_FOR_ADDRESS_001
|
||||
//stm: @EVENTS_PREDICATES_ON_DEAL_PROPOSAL_CHANGED_001, @EVENTS_PREDICATES_PROPOSAL_AMT_CHANGED_001
|
||||
//stm: @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_AMT_CHANGED_001
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewMemorySync()
|
||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||
@ -333,6 +340,8 @@ func TestMarketPredicates(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMinerSectorChange(t *testing.T) {
|
||||
//stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_MINER_ACTOR_CHANGE_001
|
||||
//stm: @EVENTS_PREDICATES_MINER_SECTOR_CHANGE_001
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewMemorySync()
|
||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package events
|
||||
|
||||
import (
|
||||
@ -92,6 +93,7 @@ func (h *cacheHarness) skip(n abi.ChainEpoch) {
|
||||
}
|
||||
|
||||
func TestTsCache(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
h := newCacheharness(t)
|
||||
|
||||
for i := 0; i < 9000; i++ {
|
||||
@ -104,6 +106,8 @@ func TestTsCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTsCacheNulls(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_BEFORE_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_AFTER_001
|
||||
//stm: @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
ctx := context.Background()
|
||||
h := newCacheharness(t)
|
||||
|
||||
@ -182,6 +186,7 @@ func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSet(ctx context.Context, tsk
|
||||
}
|
||||
|
||||
func TestTsCacheEmpty(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001
|
||||
// Calling best on an empty cache should just call out to the chain API
|
||||
callCounter := &tsCacheAPIStorageCallCounter{t: t}
|
||||
tsc := newTSCache(callCounter, 50)
|
||||
@ -191,6 +196,7 @@ func TestTsCacheEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTsCacheSkip(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
h := newCacheharness(t)
|
||||
|
||||
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package gen
|
||||
|
||||
import (
|
||||
@ -34,6 +35,7 @@ func testGeneration(t testing.TB, n int, msgs int, sectors int) {
|
||||
}
|
||||
|
||||
func TestChainGeneration(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEW_GEN_WITH_SECTORS_001, @CHAIN_GEN_NEXT_TIPSET_001
|
||||
t.Run("10-20-1", func(t *testing.T) { testGeneration(t, 10, 20, 1) })
|
||||
t.Run("10-20-25", func(t *testing.T) { testGeneration(t, 10, 20, 25) })
|
||||
}
|
||||
|
@ -491,12 +491,13 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca
|
||||
Actors: filcns.NewActorRegistry(),
|
||||
Syscalls: mkFakedSigSyscalls(sys),
|
||||
CircSupplyCalc: csc,
|
||||
FilVested: big.Zero(),
|
||||
NetworkVersion: nv,
|
||||
BaseFee: types.NewInt(0),
|
||||
BaseFee: big.Zero(),
|
||||
}
|
||||
vm, err := vm.NewVM(ctx, &vmopt)
|
||||
vm, err := vm.NewLegacyVM(ctx, &vmopt)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err)
|
||||
return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err)
|
||||
}
|
||||
|
||||
for mi, m := range template.Miners {
|
||||
|
@ -95,12 +95,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal
|
||||
Syscalls: mkFakedSigSyscalls(sys),
|
||||
CircSupplyCalc: csc,
|
||||
NetworkVersion: nv,
|
||||
BaseFee: types.NewInt(0),
|
||||
BaseFee: big.Zero(),
|
||||
FilVested: big.Zero(),
|
||||
}
|
||||
|
||||
vm, err := vm.NewVM(ctx, vmopt)
|
||||
vm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err)
|
||||
return cid.Undef, xerrors.Errorf("failed to create NewLegacyVM: %w", err)
|
||||
}
|
||||
|
||||
if len(miners) == 0 {
|
||||
@ -520,7 +521,7 @@ func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization cry
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
|
||||
func currentTotalPower(ctx context.Context, vm *vm.LegacyVM, maddr address.Address) (*power0.CurrentTotalPowerReturn, error) {
|
||||
pwret, err := doExecValue(ctx, vm, power.Address, maddr, big.Zero(), builtin0.MethodsPower.CurrentTotalPower, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -533,7 +534,7 @@ func currentTotalPower(ctx context.Context, vm *vm.VM, maddr address.Address) (*
|
||||
return &pwr, nil
|
||||
}
|
||||
|
||||
func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) {
|
||||
func dealWeight(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, dealIDs []abi.DealID, sectorStart, sectorExpiry abi.ChainEpoch, av actors.Version) (abi.DealWeight, abi.DealWeight, error) {
|
||||
// TODO: This hack should move to market actor wrapper
|
||||
if av <= actors.Version2 {
|
||||
params := &market0.VerifyDealsForActivationParams{
|
||||
@ -593,7 +594,7 @@ func dealWeight(ctx context.Context, vm *vm.VM, maddr address.Address, dealIDs [
|
||||
return dealWeights.Sectors[0].DealWeight, dealWeights.Sectors[0].VerifiedDealWeight, nil
|
||||
}
|
||||
|
||||
func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) {
|
||||
func currentEpochBlockReward(ctx context.Context, vm *vm.LegacyVM, maddr address.Address, av actors.Version) (abi.StoragePower, builtin.FilterEstimate, error) {
|
||||
rwret, err := doExecValue(ctx, vm, reward.Address, maddr, big.Zero(), reward.Methods.ThisEpochReward, nil)
|
||||
if err != nil {
|
||||
return big.Zero(), builtin.FilterEstimate{}, err
|
||||
@ -628,7 +629,7 @@ func currentEpochBlockReward(ctx context.Context, vm *vm.VM, maddr address.Addre
|
||||
return epochReward.ThisEpochBaselinePower, builtin.FilterEstimate(epochReward.ThisEpochRewardSmoothed), nil
|
||||
}
|
||||
|
||||
func circSupply(ctx context.Context, vmi *vm.VM, maddr address.Address) abi.TokenAmount {
|
||||
func circSupply(ctx context.Context, vmi *vm.LegacyVM, maddr address.Address) abi.TokenAmount {
|
||||
unsafeVM := &vm.UnsafeVM{VM: vmi}
|
||||
rt := unsafeVM.MakeRuntime(ctx, &types.Message{
|
||||
GasLimit: 1_000_000_000,
|
||||
|
@ -21,7 +21,7 @@ func mustEnc(i cbg.CBORMarshaler) []byte {
|
||||
return enc
|
||||
}
|
||||
|
||||
func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
|
||||
func doExecValue(ctx context.Context, vm *vm.LegacyVM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) {
|
||||
act, err := vm.StateTree().GetActor(from)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("doExec failed to get from actor (%s): %w", from, err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package market
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ import (
|
||||
|
||||
// TestFundManagerBasic verifies that the basic fund manager operations work
|
||||
func TestFundManagerBasic(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -106,6 +108,7 @@ func TestFundManagerBasic(t *testing.T) {
|
||||
|
||||
// TestFundManagerParallel verifies that operations can be run in parallel
|
||||
func TestFundManagerParallel(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -197,6 +200,7 @@ func TestFundManagerParallel(t *testing.T) {
|
||||
|
||||
// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet
|
||||
func TestFundManagerReserveByWallet(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -290,6 +294,7 @@ func TestFundManagerReserveByWallet(t *testing.T) {
|
||||
// TestFundManagerWithdrawal verifies that as many withdraw operations as
|
||||
// possible are processed
|
||||
func TestFundManagerWithdrawalLimit(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -384,6 +389,7 @@ func TestFundManagerWithdrawalLimit(t *testing.T) {
|
||||
|
||||
// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet
|
||||
func TestFundManagerWithdrawByWallet(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -493,6 +499,7 @@ func TestFundManagerWithdrawByWallet(t *testing.T) {
|
||||
// TestFundManagerRestart verifies that waiting for incomplete requests resumes
|
||||
// on restart
|
||||
func TestFundManagerRestart(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -559,6 +566,7 @@ func TestFundManagerRestart(t *testing.T) {
|
||||
// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5
|
||||
// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5
|
||||
func TestFundManagerReleaseAfterPublish(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBlockProbability(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_001
|
||||
mp := &MessagePool{}
|
||||
bp := mp.blockProbabilities(1 - 0.15)
|
||||
t.Logf("%+v\n", bp)
|
||||
@ -20,6 +22,7 @@ func TestBlockProbability(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWinnerProba(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
const N = 1000000
|
||||
winnerProba := noWinnersProb()
|
||||
|
@ -854,7 +854,6 @@ func TestMessageValueTooHigh(t *testing.T) {
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@ -901,8 +900,7 @@ func TestMessageSignatureInvalid(t *testing.T) {
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
// assert.Contains(t, err.Error(), "invalid signature length")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid signature length")
|
||||
}
|
||||
}
|
||||
|
||||
@ -926,14 +924,29 @@ func TestAddMessageTwice(t *testing.T) {
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create a valid messages
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// try to add it twice
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
}
|
||||
}
|
||||
|
||||
@ -963,8 +976,7 @@ func TestAddMessageTwiceNonceGap(t *testing.T) {
|
||||
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package state
|
||||
|
||||
import (
|
||||
@ -18,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkStateTreeSet(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
st, err := NewStateTree(cst, types.StateTreeVersion1)
|
||||
if err != nil {
|
||||
@ -45,6 +47,7 @@ func BenchmarkStateTreeSet(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkStateTreeSetFlush(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -80,6 +83,8 @@ func BenchmarkStateTreeSetFlush(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestResolveCache(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_SNAPSHOT_001, @CHAIN_STATETREE_SNAPSHOT_CLEAR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -182,6 +187,8 @@ func TestResolveCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkStateTree10kGetActor(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_FLUSH_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -229,6 +236,7 @@ func BenchmarkStateTree10kGetActor(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestSetCache(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -270,6 +278,8 @@ func TestSetCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSnapshots(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_FLUSH_001, @CHAIN_STATETREE_SNAPSHOT_REVERT_001, CHAIN_STATETREE_SNAPSHOT_CLEAR_001
|
||||
ctx := context.Background()
|
||||
cst := cbor.NewMemCborStore()
|
||||
|
||||
@ -360,6 +370,7 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) {
|
||||
}
|
||||
|
||||
func TestStateTreeConsistency(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001, @CHAIN_STATETREE_FLUSH_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
|
||||
// TODO: ActorUpgrade: this test tests pre actors v2
|
||||
|
@ -5,6 +5,12 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/rand"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -64,6 +70,8 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
||||
pheight = ts.Height() - 1
|
||||
}
|
||||
|
||||
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
|
||||
vmHeight := pheight + 1
|
||||
bstate := ts.ParentState()
|
||||
|
||||
// Run the (not expensive) migration.
|
||||
@ -72,9 +80,14 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
||||
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||
}
|
||||
|
||||
filVested, err := sm.GetFilVested(ctx, vmHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: bstate,
|
||||
Epoch: pheight + 1,
|
||||
Epoch: vmHeight,
|
||||
Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion),
|
||||
Bstore: sm.cs.StateBlockstore(),
|
||||
Actors: sm.tsExec.NewActorRegistry(),
|
||||
@ -82,6 +95,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||
NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1),
|
||||
BaseFee: types.NewInt(0),
|
||||
FilVested: filVested,
|
||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
|
||||
@ -112,7 +126,12 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
||||
)
|
||||
}
|
||||
|
||||
fromActor, err := vmi.StateTree().GetActor(msg.From)
|
||||
stTree, err := sm.StateTree(bstate)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load state tree: %w", err)
|
||||
}
|
||||
|
||||
fromActor, err := stTree.GetActor(msg.From)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("call raw get actor: %s", err)
|
||||
}
|
||||
@ -175,13 +194,16 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
||||
}
|
||||
}
|
||||
|
||||
state, _, err := sm.TipSetState(ctx, ts)
|
||||
// Since we're simulating a future message, pretend we're applying it in the "next" tipset
|
||||
vmHeight := ts.Height() + 1
|
||||
|
||||
stateCid, _, err := sm.TipSetState(ctx, ts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("computing tipset state: %w", err)
|
||||
}
|
||||
|
||||
// Technically, the tipset we're passing in here should be ts+1, but that may not exist.
|
||||
state, err = sm.HandleStateForks(ctx, state, ts.Height(), nil, ts)
|
||||
stateCid, err = sm.HandleStateForks(ctx, stateCid, ts.Height(), nil, ts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to handle fork: %w", err)
|
||||
}
|
||||
@ -196,16 +218,23 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
||||
)
|
||||
}
|
||||
|
||||
filVested, err := sm.GetFilVested(ctx, vmHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buffStore := blockstore.NewBuffered(sm.cs.StateBlockstore())
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: state,
|
||||
Epoch: ts.Height() + 1,
|
||||
StateBase: stateCid,
|
||||
Epoch: vmHeight,
|
||||
Rand: r,
|
||||
Bstore: sm.cs.StateBlockstore(),
|
||||
Bstore: buffStore,
|
||||
Actors: sm.tsExec.NewActorRegistry(),
|
||||
Syscalls: sm.Syscalls,
|
||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||
NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1),
|
||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||
FilVested: filVested,
|
||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
vmi, err := sm.newVM(ctx, vmopt)
|
||||
@ -219,7 +248,19 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
||||
}
|
||||
}
|
||||
|
||||
fromActor, err := vmi.StateTree().GetActor(msg.From)
|
||||
// We flush to get the VM's view of the state tree after applying the above messages
|
||||
// This is needed to get the correct nonce from the actor state to match the VM
|
||||
stateCid, err = vmi.Flush(ctx)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("flushing vm: %w", err)
|
||||
}
|
||||
|
||||
stTree, err := state.LoadStateTree(cbor.NewCborStore(buffStore), stateCid)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("loading state tree: %w", err)
|
||||
}
|
||||
|
||||
fromActor, err := stTree.GetActor(msg.From)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("call raw get actor: %s", err)
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #integration
|
||||
package stmgr_test
|
||||
|
||||
import (
|
||||
@ -106,6 +107,9 @@ func (ta *testActor) TestMethod(rt rt2.Runtime, params *abi.EmptyValue) *abi.Emp
|
||||
}
|
||||
|
||||
func TestForkHeightTriggers(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_FLUSH_001, @TOKEN_WALLET_SIGN_001
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
ctx := context.TODO()
|
||||
@ -166,8 +170,8 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
inv := filcns.NewActorRegistry()
|
||||
inv.Register(nil, testActor{})
|
||||
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||
nvm, err := vm.NewVM(ctx, vmopt)
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||
nvm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -241,6 +245,8 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestForkRefuseCall(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001, @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001, @CHAIN_STATE_CALL_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
for after := 0; after < 3; after++ {
|
||||
@ -281,8 +287,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
||||
inv := filcns.NewActorRegistry()
|
||||
inv.Register(nil, testActor{})
|
||||
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||
nvm, err := vm.NewVM(ctx, vmopt)
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||
nvm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -360,6 +366,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
||||
}
|
||||
|
||||
func TestForkPreMigration(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001,
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
@ -500,8 +508,8 @@ func TestForkPreMigration(t *testing.T) {
|
||||
inv := filcns.NewActorRegistry()
|
||||
inv.Register(nil, testActor{})
|
||||
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) {
|
||||
nvm, err := vm.NewVM(ctx, vmopt)
|
||||
sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) {
|
||||
nvm, err := vm.NewLegacyVM(ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package stmgr_test
|
||||
|
||||
import (
|
||||
@ -12,6 +13,8 @@ import (
|
||||
)
|
||||
|
||||
func TestSearchForMessageReplacements(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
||||
ctx := context.Background()
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
|
@ -84,7 +84,7 @@ type StateManager struct {
|
||||
compWait map[string]chan struct{}
|
||||
stlk sync.Mutex
|
||||
genesisMsigLk sync.Mutex
|
||||
newVM func(context.Context, *vm.VMOpts) (*vm.VM, error)
|
||||
newVM func(context.Context, *vm.VMOpts) (vm.Interface, error)
|
||||
Syscalls vm.SyscallBuilder
|
||||
preIgnitionVesting []msig0.State
|
||||
postIgnitionVesting []msig0.State
|
||||
@ -347,12 +347,12 @@ func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (*vm.VM, error)) {
|
||||
func (sm *StateManager) SetVMConstructor(nvm func(context.Context, *vm.VMOpts) (vm.Interface, error)) {
|
||||
sm.newVM = nvm
|
||||
}
|
||||
|
||||
func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.VM, error) {
|
||||
return func(ctx context.Context, opts *vm.VMOpts) (*vm.VM, error) {
|
||||
func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (vm.Interface, error) {
|
||||
return func(ctx context.Context, opts *vm.VMOpts) (vm.Interface, error) {
|
||||
return sm.newVM(ctx, opts)
|
||||
}
|
||||
}
|
||||
|
@ -196,8 +196,32 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
|
||||
// GetVestedFunds returns all funds that have "left" actors that are in the genesis state:
|
||||
// - For Multisigs, it counts the actual amounts that have vested at the given epoch
|
||||
// - For Accounts, it counts max(currentBalance - genesisBalance, 0).
|
||||
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {
|
||||
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
vf := big.Zero()
|
||||
|
||||
sm.genesisMsigLk.Lock()
|
||||
defer sm.genesisMsigLk.Unlock()
|
||||
|
||||
// TODO: combine all this?
|
||||
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
|
||||
err := sm.setupGenesisVestingSchedule(ctx)
|
||||
if err != nil {
|
||||
return vf, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postIgnitionVesting == nil {
|
||||
err := sm.setupPostIgnitionVesting(ctx)
|
||||
if err != nil {
|
||||
return vf, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postCalicoVesting == nil {
|
||||
err := sm.setupPostCalicoVesting(ctx)
|
||||
if err != nil {
|
||||
return vf, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if height <= build.UpgradeIgnitionHeight {
|
||||
for _, v := range sm.preIgnitionVesting {
|
||||
au := big.Sub(v.InitialBalance, v.AmountLocked(height))
|
||||
@ -282,7 +306,7 @@ func getFilPowerLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmoun
|
||||
return pst.TotalLocked()
|
||||
}
|
||||
|
||||
func (sm *StateManager) GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
|
||||
func GetFilLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) {
|
||||
|
||||
filMarketLocked, err := getFilMarketLocked(ctx, st)
|
||||
if err != nil {
|
||||
@ -316,28 +340,7 @@ func (sm *StateManager) GetVMCirculatingSupply(ctx context.Context, height abi.C
|
||||
}
|
||||
|
||||
func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (api.CirculatingSupply, error) {
|
||||
sm.genesisMsigLk.Lock()
|
||||
defer sm.genesisMsigLk.Unlock()
|
||||
if sm.preIgnitionVesting == nil || sm.genesisPledge.IsZero() || sm.genesisMarketFunds.IsZero() {
|
||||
err := sm.setupGenesisVestingSchedule(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup pre-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postIgnitionVesting == nil {
|
||||
err := sm.setupPostIgnitionVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-ignition vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
if sm.postCalicoVesting == nil {
|
||||
err := sm.setupPostCalicoVesting(ctx)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to setup post-calico vesting schedule: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
filVested, err := sm.GetFilVested(ctx, height, st)
|
||||
filVested, err := sm.GetFilVested(ctx, height)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filVested: %w", err)
|
||||
}
|
||||
@ -360,7 +363,7 @@ func (sm *StateManager) GetVMCirculatingSupplyDetailed(ctx context.Context, heig
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filBurnt: %w", err)
|
||||
}
|
||||
|
||||
filLocked, err := sm.GetFilLocked(ctx, st)
|
||||
filLocked, err := GetFilLocked(ctx, st)
|
||||
if err != nil {
|
||||
return api.CirculatingSupply{}, xerrors.Errorf("failed to calculate filLocked: %w", err)
|
||||
}
|
||||
|
@ -79,6 +79,11 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
||||
// future. It's not guaranteed to be accurate... but that's fine.
|
||||
}
|
||||
|
||||
filVested, err := sm.GetFilVested(ctx, height)
|
||||
if err != nil {
|
||||
return cid.Undef, nil, err
|
||||
}
|
||||
|
||||
r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion)
|
||||
vmopt := &vm.VMOpts{
|
||||
StateBase: base,
|
||||
@ -90,6 +95,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
||||
CircSupplyCalc: sm.GetVMCirculatingSupply,
|
||||
NetworkVersion: sm.GetNetworkVersion(ctx, height),
|
||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||
FilVested: filVested,
|
||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||
}
|
||||
vmi, err := sm.newVM(ctx, vmopt)
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: #unit
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
@ -10,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBaseFee(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_COMPUTE_NEXT_BASE_FEE_001
|
||||
tests := []struct {
|
||||
basefee uint64
|
||||
limitUsed int64
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -10,6 +11,9 @@ import (
|
||||
)
|
||||
|
||||
func TestChainCheckpoint(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_FROM_KEY_001, @CHAIN_STORE_SET_HEAD_001, @CHAIN_STORE_GET_HEAVIEST_TIPSET_001
|
||||
//stm: @CHAIN_STORE_SET_CHECKPOINT_001, @CHAIN_STORE_MAYBE_TAKE_HEAVIER_TIPSET_001, @CHAIN_STORE_REMOVE_CHECKPOINT_001
|
||||
ctx := context.Background()
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadChangeCoalescer(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_COALESCE_HEAD_CHANGE_001
|
||||
notif := make(chan headChange, 1)
|
||||
c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error {
|
||||
notif <- headChange{apply: apply, revert: revert}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -17,6 +18,9 @@ import (
|
||||
)
|
||||
|
||||
func TestIndexSeeks(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_IMPORT_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001, @CHAIN_STORE_PUT_TIPSET_001, @CHAIN_STORE_SET_GENESIS_BLOCK_001
|
||||
//stm: @CHAIN_STORE_CLOSE_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -18,6 +18,10 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func (cs *ChainStore) UnionStore() bstore.Blockstore {
|
||||
return bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
|
||||
}
|
||||
|
||||
func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRoots abi.ChainEpoch, skipOldMsgs bool, w io.Writer) error {
|
||||
h := &car.CarHeader{
|
||||
Roots: ts.Cids(),
|
||||
@ -28,7 +32,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo
|
||||
return xerrors.Errorf("failed to write car header: %s", err)
|
||||
}
|
||||
|
||||
unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore)
|
||||
unionBs := cs.UnionStore()
|
||||
return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error {
|
||||
blk, err := unionBs.Get(ctx, c)
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -28,6 +29,8 @@ func init() {
|
||||
}
|
||||
|
||||
func BenchmarkGetRandomness(b *testing.B) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -85,6 +88,8 @@ func BenchmarkGetRandomness(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestChainExportImport(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STORE_IMPORT_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -120,6 +125,9 @@ func TestChainExportImport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainExportImportFull(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STORE_IMPORT_001, @CHAIN_STORE_EXPORT_001, @CHAIN_STORE_SET_HEAD_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package chain
|
||||
|
||||
import (
|
||||
@ -78,6 +79,7 @@ func assertGetSyncOp(t *testing.T, c chan *syncOp, ts *types.TipSet) {
|
||||
}
|
||||
|
||||
func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_SET_PEER_HEAD_001
|
||||
ctx := context.Background()
|
||||
|
||||
a := mock.TipSet(mock.MkBlock(genTs, 1, 1))
|
||||
@ -161,6 +163,7 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncManager(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_SET_PEER_HEAD_001
|
||||
ctx := context.Background()
|
||||
|
||||
a := mock.TipSet(mock.MkBlock(genTs, 1, 1))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -14,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBigIntSerializationRoundTrip(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_PARSE_BIGINT_001
|
||||
testValues := []string{
|
||||
"0", "1", "10", "-10", "9999", "12345678901234567891234567890123456789012345678901234567890",
|
||||
}
|
||||
@ -42,6 +44,7 @@ func TestBigIntSerializationRoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFilRoundTrip(t *testing.T) {
|
||||
//stm: @TYPES_FIL_PARSE_001
|
||||
testValues := []string{
|
||||
"0 FIL", "1 FIL", "1.001 FIL", "100.10001 FIL", "101100 FIL", "5000.01 FIL", "5000 FIL",
|
||||
}
|
||||
@ -59,6 +62,7 @@ func TestFilRoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStr(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
cases := []struct {
|
||||
in uint64
|
||||
out string
|
||||
@ -79,6 +83,7 @@ func TestSizeStr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStrUnitsSymmetry(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
s := rand.NewSource(time.Now().UnixNano())
|
||||
r := rand.New(s)
|
||||
|
||||
@ -95,6 +100,7 @@ func TestSizeStrUnitsSymmetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStrBig(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
ZiB := big.NewInt(50000)
|
||||
ZiB = ZiB.Lsh(ZiB, 70)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -51,6 +52,7 @@ func testBlockHeader(t testing.TB) *BlockHeader {
|
||||
}
|
||||
|
||||
func TestBlockHeaderSerialization(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_BLOCK_HEADER_FROM_CBOR_001, @CHAIN_TYPES_BLOCK_HEADER_TO_CBOR_001
|
||||
bh := testBlockHeader(t)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
@ -71,6 +73,7 @@ func TestBlockHeaderSerialization(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInteropBH(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_INTEROP_001
|
||||
newAddr, err := address.NewSecp256k1Address([]byte("address0"))
|
||||
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -11,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPoissonFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_POISSON_001
|
||||
tests := []struct {
|
||||
lambdaBase uint64
|
||||
lambdaShift uint
|
||||
@ -47,6 +49,7 @@ func TestPoissonFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLambdaFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_LAMBDA_001
|
||||
tests := []struct {
|
||||
power string
|
||||
totalPower string
|
||||
@ -72,6 +75,7 @@ func TestLambdaFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExpFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_NEGATIVE_EXP_001
|
||||
const N = 256
|
||||
|
||||
step := big.NewInt(5)
|
||||
@ -100,6 +104,7 @@ func q256ToF(x *big.Int) float64 {
|
||||
}
|
||||
|
||||
func TestElectionLam(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_LAMBDA_001
|
||||
p := big.NewInt(64)
|
||||
tot := big.NewInt(128)
|
||||
lam := lambda(p, tot)
|
||||
@ -128,6 +133,7 @@ func BenchmarkWinCounts(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestWinCounts(t *testing.T) {
|
||||
//stm: @TYPES_ELECTION_PROOF_COMPUTE_WIN_COUNT_001
|
||||
totalPower := NewInt(100)
|
||||
power := NewInt(20)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -7,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFilShort(t *testing.T) {
|
||||
//stm: @TYPES_FIL_PARSE_001
|
||||
for _, s := range []struct {
|
||||
fil string
|
||||
expect string
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -71,6 +72,7 @@ func TestEqualCall(t *testing.T) {
|
||||
Params: []byte("hai"),
|
||||
}
|
||||
|
||||
//stm: @TYPES_MESSAGE_EQUAL_CALL_001
|
||||
require.True(t, m1.EqualCall(m2))
|
||||
require.True(t, m1.EqualCall(m3))
|
||||
require.False(t, m1.EqualCall(m4))
|
||||
@ -97,11 +99,13 @@ func TestMessageJson(t *testing.T) {
|
||||
exp := []byte("{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}}")
|
||||
fmt.Println(string(b))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001
|
||||
require.Equal(t, exp, b)
|
||||
|
||||
var um Message
|
||||
require.NoError(t, json.Unmarshal(b, &um))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
require.EqualValues(t, *m, um)
|
||||
}
|
||||
|
||||
@ -131,10 +135,12 @@ func TestSignedMessageJson(t *testing.T) {
|
||||
exp := []byte("{\"Message\":{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}},\"Signature\":{\"Type\":0,\"Data\":null},\"CID\":{\"/\":\"bafy2bzacea5ainifngxj3rygaw2hppnyz2cw72x5pysqty2x6dxmjs5qg2uus\"}}")
|
||||
fmt.Println(string(b))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001
|
||||
require.Equal(t, exp, b)
|
||||
|
||||
var um SignedMessage
|
||||
require.NoError(t, json.Unmarshal(b, &um))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
require.EqualValues(t, *sm, um)
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignatureSerializeRoundTrip(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIGNATURE_SERIALIZATION_001
|
||||
s := &crypto.Signature{
|
||||
Data: []byte("foo bar cat dog"),
|
||||
Type: crypto.SigTypeBLS,
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -12,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTipSetKey(t *testing.T) {
|
||||
//stm: @TYPES_TIPSETKEY_FROM_BYTES_001, @TYPES_TIPSETKEY_NEW_001
|
||||
cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}
|
||||
c1, _ := cb.Sum([]byte("a"))
|
||||
c2, _ := cb.Sum([]byte("b"))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package chain
|
||||
|
||||
import (
|
||||
@ -12,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignedMessageJsonRoundtrip(t *testing.T) {
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
to, _ := address.NewIDAddress(5234623)
|
||||
from, _ := address.NewIDAddress(603911192)
|
||||
smsg := &types.SignedMessage{
|
||||
@ -40,6 +42,7 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddressType(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_ADDRESS_PREFIX_001
|
||||
build.SetAddressNetwork(address.Testnet)
|
||||
addr, err := makeRandomAddress()
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vectors
|
||||
|
||||
import (
|
||||
@ -26,6 +27,7 @@ func LoadVector(t *testing.T, f string, out interface{}) {
|
||||
}
|
||||
|
||||
func TestBlockHeaderVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_BLOCK_001
|
||||
var headers []HeaderVector
|
||||
LoadVector(t, "block_headers.json", &headers)
|
||||
|
||||
@ -46,6 +48,7 @@ func TestBlockHeaderVectors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSigningVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_SIGNED_MESSAGE_001
|
||||
var msvs []MessageSigningVector
|
||||
LoadVector(t, "message_signing.json", &msvs)
|
||||
|
||||
@ -64,6 +67,7 @@ func TestMessageSigningVectors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnsignedMessageVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_MESSAGE_001
|
||||
var msvs []UnsignedMessageVector
|
||||
LoadVector(t, "unsigned_messages.json", &msvs)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestGasBurn(t *testing.T) {
|
||||
//stm: @BURN_ESTIMATE_GAS_OVERESTIMATION_BURN_001
|
||||
tests := []struct {
|
||||
used int64
|
||||
limit int64
|
||||
@ -40,6 +42,7 @@ func TestGasBurn(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGasOutputs(t *testing.T) {
|
||||
//stm: @BURN_ESTIMATE_GAS_OUTPUTS_001
|
||||
baseFee := types.NewInt(10)
|
||||
tests := []struct {
|
||||
used int64
|
||||
|
312
chain/vm/fvm.go
Normal file
312
chain/vm/fvm.go
Normal file
@ -0,0 +1,312 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
|
||||
ffi "github.com/filecoin-project/filecoin-ffi"
|
||||
ffi_cgo "github.com/filecoin-project/filecoin-ffi/cgo"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var _ Interface = (*FVM)(nil)
|
||||
var _ ffi_cgo.Externs = (*FvmExtern)(nil)
|
||||
|
||||
type FvmExtern struct {
|
||||
Rand
|
||||
blockstore.Blockstore
|
||||
epoch abi.ChainEpoch
|
||||
lbState LookbackStateGetter
|
||||
base cid.Cid
|
||||
}
|
||||
|
||||
// VerifyConsensusFault is similar to the one in syscalls.go used by the LegacyVM, except it never errors
|
||||
// Errors are logged and "no fault" is returned, which is functionally what go-actors does anyway
|
||||
func (x *FvmExtern) VerifyConsensusFault(ctx context.Context, a, b, extra []byte) (*ffi_cgo.ConsensusFault, int64) {
|
||||
totalGas := int64(0)
|
||||
ret := &ffi_cgo.ConsensusFault{
|
||||
Type: ffi_cgo.ConsensusFaultNone,
|
||||
}
|
||||
|
||||
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
|
||||
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
|
||||
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
|
||||
// (which runs a syntactic check), we do it directly on the CIDs.
|
||||
|
||||
// (0) cheap preliminary checks
|
||||
|
||||
// can blocks be decoded properly?
|
||||
var blockA, blockB types.BlockHeader
|
||||
if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil {
|
||||
log.Info("invalid consensus fault: cannot decode first block header: %w", decodeErr)
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil {
|
||||
log.Info("invalid consensus fault: cannot decode second block header: %w", decodeErr)
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
// are blocks the same?
|
||||
if blockA.Cid().Equals(blockB.Cid()) {
|
||||
log.Info("invalid consensus fault: submitted blocks are the same")
|
||||
return ret, totalGas
|
||||
}
|
||||
// (1) check conditions necessary to any consensus fault
|
||||
|
||||
// were blocks mined by same miner?
|
||||
if blockA.Miner != blockB.Miner {
|
||||
log.Info("invalid consensus fault: blocks not mined by the same miner")
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
// block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain).
|
||||
if blockB.Height < blockA.Height {
|
||||
log.Info("invalid consensus fault: first block must not be of higher height than second")
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
ret.Epoch = blockB.Height
|
||||
|
||||
faultType := ffi_cgo.ConsensusFaultNone
|
||||
|
||||
// (2) check for the consensus faults themselves
|
||||
// (a) double-fork mining fault
|
||||
if blockA.Height == blockB.Height {
|
||||
faultType = ffi_cgo.ConsensusFaultDoubleForkMining
|
||||
}
|
||||
|
||||
// (b) time-offset mining fault
|
||||
// strictly speaking no need to compare heights based on double fork mining check above,
|
||||
// but at same height this would be a different fault.
|
||||
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
|
||||
faultType = ffi_cgo.ConsensusFaultTimeOffsetMining
|
||||
}
|
||||
|
||||
// (c) parent-grinding fault
|
||||
// Here extra is the "witness", a third block that shows the connection between A and B as
|
||||
// A's sibling and B's parent.
|
||||
// Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
|
||||
//
|
||||
// B
|
||||
// |
|
||||
// [A, C]
|
||||
var blockC types.BlockHeader
|
||||
if len(extra) > 0 {
|
||||
if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil {
|
||||
log.Info("invalid consensus fault: cannot decode extra: %w", decodeErr)
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
|
||||
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
|
||||
faultType = ffi_cgo.ConsensusFaultParentGrinding
|
||||
}
|
||||
}
|
||||
|
||||
// (3) return if no consensus fault by now
|
||||
if faultType == ffi_cgo.ConsensusFaultNone {
|
||||
log.Info("invalid consensus fault: no fault detected")
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
// else
|
||||
// (4) expensive final checks
|
||||
|
||||
// check blocks are properly signed by their respective miner
|
||||
// note we do not need to check extra's: it is a parent to block b
|
||||
// which itself is signed, so it was willingly included by the miner
|
||||
gasA, sigErr := x.VerifyBlockSig(ctx, &blockA)
|
||||
totalGas += gasA
|
||||
if sigErr != nil {
|
||||
log.Info("invalid consensus fault: cannot verify first block sig: %w", sigErr)
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
gas2, sigErr := x.VerifyBlockSig(ctx, &blockB)
|
||||
totalGas += gas2
|
||||
if sigErr != nil {
|
||||
log.Info("invalid consensus fault: cannot verify second block sig: %w", sigErr)
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
ret.Type = faultType
|
||||
ret.Target = blockA.Miner
|
||||
|
||||
return ret, totalGas
|
||||
}
|
||||
|
||||
func (x *FvmExtern) VerifyBlockSig(ctx context.Context, blk *types.BlockHeader) (int64, error) {
|
||||
waddr, gasUsed, err := x.workerKeyAtLookback(ctx, blk.Miner, blk.Height)
|
||||
if err != nil {
|
||||
return gasUsed, err
|
||||
}
|
||||
|
||||
return gasUsed, sigs.CheckBlockSignature(ctx, blk, waddr)
|
||||
}
|
||||
|
||||
func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Address, height abi.ChainEpoch) (address.Address, int64, error) {
|
||||
gasUsed := int64(0)
|
||||
gasAdder := func(gc GasCharge) {
|
||||
// technically not overflow safe, but that's fine
|
||||
gasUsed += gc.Total()
|
||||
}
|
||||
|
||||
cstWithoutGas := cbor.NewCborStore(x.Blockstore)
|
||||
cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore}
|
||||
cstWithGas := cbor.NewCborStore(cbb)
|
||||
|
||||
lbState, err := x.lbState(ctx, height)
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
// get appropriate miner actor
|
||||
act, err := lbState.GetActor(minerId)
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
|
||||
// use that to get the miner state
|
||||
mas, err := miner.Load(adt.WrapStore(ctx, cstWithGas), act)
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
|
||||
info, err := mas.Info()
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
|
||||
stateTree, err := state.LoadStateTree(cstWithoutGas, x.base)
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
|
||||
raddr, err := ResolveToKeyAddr(stateTree, cstWithGas, info.Worker)
|
||||
if err != nil {
|
||||
return address.Undef, gasUsed, err
|
||||
}
|
||||
|
||||
return raddr, gasUsed, nil
|
||||
}
|
||||
|
||||
type FVM struct {
|
||||
fvm *ffi.FVM
|
||||
}
|
||||
|
||||
func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
|
||||
circToReport := opts.FilVested
|
||||
// For v14 (and earlier), we perform the FilVested portion of the calculation, and let the FVM dynamically do the rest
|
||||
// v15 and after, the circ supply is always constant per epoch, so we calculate the base and report it at creation
|
||||
if opts.NetworkVersion >= network.Version15 {
|
||||
state, err := state.LoadStateTree(cbor.NewCborStore(opts.Bstore), opts.StateBase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
circToReport, err = opts.CircSupplyCalc(ctx, opts.Epoch, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
fvm, err := ffi.CreateFVM(0,
|
||||
&FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch},
|
||||
opts.Epoch, opts.BaseFee, circToReport, opts.NetworkVersion, opts.StateBase,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FVM{
|
||||
fvm: fvm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||
start := build.Clock.Now()
|
||||
msgBytes, err := cmsg.VMMessage().Serialize()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("serializing msg: %w", err)
|
||||
}
|
||||
|
||||
ret, err := vm.fvm.ApplyMessage(msgBytes, uint(cmsg.ChainLength()))
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("applying msg: %w", err)
|
||||
}
|
||||
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
Return: ret.Return,
|
||||
ExitCode: exitcode.ExitCode(ret.ExitCode),
|
||||
GasUsed: ret.GasUsed,
|
||||
},
|
||||
GasCosts: &GasOutputs{
|
||||
// TODO: do the other optional fields eventually
|
||||
BaseFeeBurn: big.Zero(),
|
||||
OverEstimationBurn: big.Zero(),
|
||||
MinerPenalty: ret.MinerPenalty,
|
||||
MinerTip: ret.MinerTip,
|
||||
Refund: big.Zero(),
|
||||
GasRefund: 0,
|
||||
GasBurned: 0,
|
||||
},
|
||||
// TODO: do these eventually, not consensus critical
|
||||
// https://github.com/filecoin-project/ref-fvm/issues/318
|
||||
ActorErr: nil,
|
||||
ExecutionTrace: types.ExecutionTrace{},
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) {
|
||||
start := build.Clock.Now()
|
||||
msgBytes, err := cmsg.VMMessage().Serialize()
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("serializing msg: %w", err)
|
||||
}
|
||||
ret, err := vm.fvm.ApplyImplicitMessage(msgBytes)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("applying msg: %w", err)
|
||||
}
|
||||
|
||||
return &ApplyRet{
|
||||
MessageReceipt: types.MessageReceipt{
|
||||
Return: ret.Return,
|
||||
ExitCode: exitcode.ExitCode(ret.ExitCode),
|
||||
GasUsed: ret.GasUsed,
|
||||
},
|
||||
GasCosts: nil,
|
||||
// TODO: do these eventually, not consensus critical
|
||||
// https://github.com/filecoin-project/ref-fvm/issues/318
|
||||
ActorErr: nil,
|
||||
ExecutionTrace: types.ExecutionTrace{},
|
||||
Duration: time.Since(start),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *FVM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return vm.fvm.Flush()
|
||||
}
|
@ -50,7 +50,7 @@ func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge {
|
||||
}
|
||||
}
|
||||
|
||||
// Pricelist provides prices for operations in the VM.
|
||||
// Pricelist provides prices for operations in the LegacyVM.
|
||||
//
|
||||
// Note: this interface should be APPEND ONLY since last chain checkpoint
|
||||
type Pricelist interface {
|
||||
|
@ -50,7 +50,7 @@ type pricelistV0 struct {
|
||||
// whether it succeeds or fails in application) is given by:
|
||||
// OnChainMessageBase + len(serialized message)*OnChainMessagePerByte
|
||||
// Together, these account for the cost of message propagation and validation,
|
||||
// up to but excluding any actual processing by the VM.
|
||||
// up to but excluding any actual processing by the LegacyVM.
|
||||
// This is the cost a block producer burns when including an invalid message.
|
||||
onChainMessageComputeBase int64
|
||||
onChainMessageStorageBase int64
|
||||
@ -83,11 +83,11 @@ type pricelistV0 struct {
|
||||
sendInvokeMethod int64
|
||||
|
||||
// Gas cost for any Get operation to the IPLD store
|
||||
// in the runtime VM context.
|
||||
// in the runtime LegacyVM context.
|
||||
ipldGetBase int64
|
||||
|
||||
// Gas cost (Base + len*PerByte) for any Put operation to the IPLD store
|
||||
// in the runtime VM context.
|
||||
// in the runtime LegacyVM context.
|
||||
//
|
||||
// Note: these costs should be significantly higher than the costs for Get
|
||||
// operations, since they reflect not only serialization/deserialization
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -106,6 +107,7 @@ func (*basicRtMessage) ValueReceived() abi.TokenAmount {
|
||||
}
|
||||
|
||||
func TestInvokerBasic(t *testing.T) {
|
||||
//stm: @INVOKER_TRANSFORM_001
|
||||
inv := ActorRegistry{}
|
||||
code, err := inv.transform(basicContract{})
|
||||
assert.NoError(t, err)
|
||||
@ -135,7 +137,7 @@ func TestInvokerBasic(t *testing.T) {
|
||||
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{networkVersion: network.Version0},
|
||||
vm: &LegacyVM{networkVersion: network.Version0},
|
||||
Message: &basicRtMessage{},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
@ -146,7 +148,7 @@ func TestInvokerBasic(t *testing.T) {
|
||||
|
||||
{
|
||||
_, aerr := code[1](&Runtime{
|
||||
vm: &VM{networkVersion: network.Version7},
|
||||
vm: &LegacyVM{networkVersion: network.Version7},
|
||||
Message: &basicRtMessage{},
|
||||
}, []byte{99})
|
||||
if aerrors.IsFatal(aerr) {
|
||||
|
@ -65,7 +65,7 @@ type Runtime struct {
|
||||
|
||||
ctx context.Context
|
||||
|
||||
vm *VM
|
||||
vm *LegacyVM
|
||||
state *state.StateTree
|
||||
height abi.ChainEpoch
|
||||
cst ipldcbor.IpldStore
|
||||
@ -158,7 +158,7 @@ func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.Act
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if ar, ok := r.(aerrors.ActorError); ok {
|
||||
log.Warnf("VM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar)
|
||||
log.Warnf("LegacyVM.Call failure in call from: %s to %s: %+v", rt.Caller(), rt.Receiver(), ar)
|
||||
aerr = ar
|
||||
return
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ func (*NotAVeryGoodMarshaler) MarshalCBOR(writer io.Writer) error {
|
||||
var _ cbg.CBORMarshaler = &NotAVeryGoodMarshaler{}
|
||||
|
||||
func TestRuntimePutErrors(t *testing.T) {
|
||||
//stm: @CHAIN_VM_STORE_PUT_002
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil {
|
||||
|
@ -122,7 +122,7 @@ func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
|
||||
func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime {
|
||||
rt := &Runtime{
|
||||
ctx: ctx,
|
||||
vm: vm,
|
||||
@ -188,7 +188,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti
|
||||
}
|
||||
|
||||
type UnsafeVM struct {
|
||||
VM *VM
|
||||
VM *LegacyVM
|
||||
}
|
||||
|
||||
func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime {
|
||||
@ -201,7 +201,9 @@ type (
|
||||
LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error)
|
||||
)
|
||||
|
||||
type VM struct {
|
||||
var _ Interface = (*LegacyVM)(nil)
|
||||
|
||||
type LegacyVM struct {
|
||||
cstate *state.StateTree
|
||||
cst *cbor.BasicIpldStore
|
||||
buf *blockstore.BufferedBlockstore
|
||||
@ -225,12 +227,14 @@ type VMOpts struct {
|
||||
Actors *ActorRegistry
|
||||
Syscalls SyscallBuilder
|
||||
CircSupplyCalc CircSupplyCalculator
|
||||
// Amount of FIL vested from genesis actors.
|
||||
FilVested abi.TokenAmount
|
||||
NetworkVersion network.Version
|
||||
BaseFee abi.TokenAmount
|
||||
LookbackState LookbackStateGetter
|
||||
}
|
||||
|
||||
func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
|
||||
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
|
||||
buf := blockstore.NewBuffered(opts.Bstore)
|
||||
cst := cbor.NewCborStore(buf)
|
||||
state, err := state.LoadStateTree(cst, opts.StateBase)
|
||||
@ -243,7 +247,7 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &VM{
|
||||
return &LegacyVM{
|
||||
cstate: state,
|
||||
cst: cst,
|
||||
buf: buf,
|
||||
@ -272,7 +276,7 @@ type ApplyRet struct {
|
||||
GasCosts *GasOutputs
|
||||
}
|
||||
|
||||
func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
||||
func (vm *LegacyVM) send(ctx context.Context, msg *types.Message, parent *Runtime,
|
||||
gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) {
|
||||
defer atomic.AddUint64(&StatSends, 1)
|
||||
|
||||
@ -391,7 +395,7 @@ func checkMessage(msg *types.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
|
||||
func (vm *LegacyVM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) {
|
||||
start := build.Clock.Now()
|
||||
defer atomic.AddUint64(&StatApplied, 1)
|
||||
ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start)
|
||||
@ -409,7 +413,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap
|
||||
}, actorErr
|
||||
}
|
||||
|
||||
func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||
func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||
start := build.Clock.Now()
|
||||
ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage")
|
||||
defer span.End()
|
||||
@ -616,7 +620,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
|
||||
func (vm *LegacyVM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) {
|
||||
if vm.networkVersion <= network.Version12 {
|
||||
// Check to see if we should burn funds. We avoid burning on successful
|
||||
// window post. This won't catch _indirect_ window post calls, but this
|
||||
@ -646,7 +650,7 @@ func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Me
|
||||
|
||||
type vmFlushKey struct{}
|
||||
|
||||
func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
func (vm *LegacyVM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
_, span := trace.StartSpan(ctx, "vm.Flush")
|
||||
defer span.End()
|
||||
|
||||
@ -665,9 +669,9 @@ func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Get the buffered blockstore associated with the VM. This includes any temporary blocks produced
|
||||
// during this VM's execution.
|
||||
func (vm *VM) ActorStore(ctx context.Context) adt.Store {
|
||||
// Get the buffered blockstore associated with the LegacyVM. This includes any temporary blocks produced
|
||||
// during this LegacyVM's execution.
|
||||
func (vm *LegacyVM) ActorStore(ctx context.Context) adt.Store {
|
||||
return adt.WrapStore(ctx, vm.cst)
|
||||
}
|
||||
|
||||
@ -820,11 +824,11 @@ func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VM) StateTree() types.StateTree {
|
||||
func (vm *LegacyVM) StateTree() types.StateTree {
|
||||
return vm.cstate
|
||||
}
|
||||
|
||||
func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
|
||||
func (vm *LegacyVM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) {
|
||||
ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke")
|
||||
defer span.End()
|
||||
if span.IsRecordingEvents() {
|
||||
@ -847,11 +851,11 @@ func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (vm *VM) SetInvoker(i *ActorRegistry) {
|
||||
func (vm *LegacyVM) SetInvoker(i *ActorRegistry) {
|
||||
vm.areg = i
|
||||
}
|
||||
|
||||
func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
|
||||
func (vm *LegacyVM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
|
||||
// Before v15, this was recalculated on each invocation as the state tree was mutated
|
||||
if vm.networkVersion <= network.Version14 {
|
||||
return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate)
|
||||
@ -860,14 +864,14 @@ func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) {
|
||||
return vm.baseCircSupply, nil
|
||||
}
|
||||
|
||||
func (vm *VM) incrementNonce(addr address.Address) error {
|
||||
func (vm *LegacyVM) incrementNonce(addr address.Address) error {
|
||||
return vm.cstate.MutateActor(addr, func(a *types.Actor) error {
|
||||
a.Nonce++
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError {
|
||||
func (vm *LegacyVM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError {
|
||||
var f *types.Actor
|
||||
var fromID, toID address.Address
|
||||
var err error
|
||||
@ -955,7 +959,7 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersio
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
|
||||
func (vm *LegacyVM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
|
||||
if amt.LessThan(types.NewInt(0)) {
|
||||
return xerrors.Errorf("attempted to transfer negative value to gas holder")
|
||||
}
|
||||
@ -969,7 +973,7 @@ func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor,
|
||||
})
|
||||
}
|
||||
|
||||
func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
|
||||
func (vm *LegacyVM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error {
|
||||
if amt.LessThan(types.NewInt(0)) {
|
||||
return xerrors.Errorf("attempted to transfer negative value from gas holder")
|
||||
}
|
||||
|
27
chain/vm/vmi.go
Normal file
27
chain/vm/vmi.go
Normal file
@ -0,0 +1,27 @@
|
||||
package vm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
// Applies the given message onto the VM's current state, returning the result of the execution
|
||||
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
|
||||
// Same as above but for system messages (the Cron invocation and block reward payments).
|
||||
// Must NEVER fail.
|
||||
ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error)
|
||||
// Flush all buffered objects into the state store provided to the VM at construction.
|
||||
Flush(ctx context.Context) (cid.Cid, error)
|
||||
}
|
||||
|
||||
func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
||||
if os.Getenv("LOTUS_USE_FVM_EXPERIMENTAL") == "1" {
|
||||
return NewFVM(ctx, opts)
|
||||
}
|
||||
|
||||
return NewLegacyVM(ctx, opts)
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1913,8 +1913,9 @@ type deal struct {
|
||||
}
|
||||
|
||||
var clientGetDealCmd = &cli.Command{
|
||||
Name: "get-deal",
|
||||
Usage: "Print detailed deal information",
|
||||
Name: "get-deal",
|
||||
Usage: "Print detailed deal information",
|
||||
ArgsUsage: "[proposalCID]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
||||
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1768,6 +1768,9 @@ var StateSectorCmd = &cli.Command{
|
||||
fmt.Println("SectorNumber: ", si.SectorNumber)
|
||||
fmt.Println("SealProof: ", si.SealProof)
|
||||
fmt.Println("SealedCID: ", si.SealedCID)
|
||||
if si.SectorKeyCID != nil {
|
||||
fmt.Println("SectorKeyCID: ", si.SectorKeyCID)
|
||||
}
|
||||
fmt.Println("DealIDs: ", si.DealIDs)
|
||||
fmt.Println()
|
||||
fmt.Println("Activation: ", EpochTime(ts.Height(), si.Activation))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
//stm: @CMD_LIMITER_GET_IP_LIMITER_001, @CMD_LIMITER_GET_WALLET_LIMITER_001
|
||||
limiter := NewLimiter(LimiterConfig{
|
||||
TotalRate: time.Second,
|
||||
TotalBurst: 20,
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestAppendCIDsToWindow(t *testing.T) {
|
||||
//stm: @CMD_HEALTH_APPEND_CIDS_001
|
||||
assert := assert.New(t)
|
||||
var window CidWindow
|
||||
threshold := 3
|
||||
@ -27,6 +29,7 @@ func TestAppendCIDsToWindow(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckWindow(t *testing.T) {
|
||||
//stm: @CMD_HEALTH_APPEND_CIDS_001, @CMD_HEALTH_CHECK_WINDOW_001
|
||||
assert := assert.New(t)
|
||||
threshold := 3
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -23,6 +24,7 @@ import (
|
||||
)
|
||||
|
||||
func TestWorkerKeyChange(t *testing.T) {
|
||||
//stm: @OTHER_WORKER_KEY_CHANGE_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #integration
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -49,6 +50,7 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
//stm: @CLIENT_DATA_IMPORT_001, @CLIENT_STORAGE_DEALS_GET_001
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
|
||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||
|
@ -466,6 +466,7 @@ var stateOrder = map[sealing.SectorState]stateMeta{}
|
||||
var stateList = []stateMeta{
|
||||
{col: 39, state: "Total"},
|
||||
{col: color.FgGreen, state: sealing.Proving},
|
||||
{col: color.FgGreen, state: sealing.Available},
|
||||
{col: color.FgGreen, state: sealing.UpdateActivating},
|
||||
|
||||
{col: color.FgBlue, state: sealing.Empty},
|
||||
|
@ -11,9 +11,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/fatih/color"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
@ -56,7 +53,6 @@ var sectorsCmd = &cli.Command{
|
||||
sectorsRemoveCmd,
|
||||
sectorsSnapUpCmd,
|
||||
sectorsSnapAbortCmd,
|
||||
sectorsMarkForUpgradeCmd,
|
||||
sectorsStartSealCmd,
|
||||
sectorsSealDelayCmd,
|
||||
sectorsCapacityCollateralCmd,
|
||||
@ -351,7 +347,7 @@ var sectorsListCmd = &cli.Command{
|
||||
|
||||
if cctx.Bool("unproven") {
|
||||
for state := range sealing.ExistSectorStateList {
|
||||
if state == sealing.Proving {
|
||||
if state == sealing.Proving || state == sealing.Available {
|
||||
continue
|
||||
}
|
||||
states = append(states, api.SectorState(state))
|
||||
@ -437,7 +433,7 @@ var sectorsListCmd = &cli.Command{
|
||||
const verifiedPowerGainMul = 9
|
||||
|
||||
dw, vp := .0, .0
|
||||
estimate := st.Expiration-st.Activation <= 0
|
||||
estimate := (st.Expiration-st.Activation <= 0) || sealing.IsUpgradeState(sealing.SectorState(st.State))
|
||||
if !estimate {
|
||||
rdw := big.Add(st.DealWeight, st.VerifiedDealWeight)
|
||||
dw = float64(big.Div(rdw, big.NewInt(int64(st.Expiration-st.Activation))).Uint64())
|
||||
@ -1568,57 +1564,6 @@ var sectorsSnapAbortCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsMarkForUpgradeCmd = &cli.Command{
|
||||
Name: "mark-for-upgrade",
|
||||
Usage: "Mark a committed capacity sector for replacement by a sector with deals",
|
||||
ArgsUsage: "<sectorNum>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number"))
|
||||
}
|
||||
|
||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
api, nCloser, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer nCloser()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get network version: %w", err)
|
||||
}
|
||||
if nv >= network.Version15 {
|
||||
return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`")
|
||||
}
|
||||
|
||||
// disable mark for upgrade two days before the ntwk v15 upgrade
|
||||
// TODO: remove the following block in v1.15.1
|
||||
head, err := api.ChainHead(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get chain head: %w", err)
|
||||
}
|
||||
twoDays := abi.ChainEpoch(2 * builtin.EpochsInDay)
|
||||
if head.Height() > (build.UpgradeOhSnapHeight - twoDays) {
|
||||
return xerrors.Errorf("OhSnap is coming soon, " +
|
||||
"please use `snap-up` to upgrade your cc sectors after the network v15 upgrade!")
|
||||
}
|
||||
|
||||
id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse sector number: %w", err)
|
||||
}
|
||||
|
||||
return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false)
|
||||
},
|
||||
}
|
||||
|
||||
var sectorsStartSealCmd = &cli.Command{
|
||||
Name: "seal",
|
||||
Usage: "Manually start sealing a sector (filling any unused space with junk)",
|
||||
|
@ -598,7 +598,7 @@ var storageListSectorsCmd = &cli.Command{
|
||||
ft storiface.SectorFileType
|
||||
urls string
|
||||
|
||||
primary, seal, store bool
|
||||
primary, copy, main, seal, store bool
|
||||
|
||||
state api.SectorState
|
||||
}
|
||||
@ -626,8 +626,11 @@ var storageListSectorsCmd = &cli.Command{
|
||||
urls: strings.Join(info.URLs, ";"),
|
||||
|
||||
primary: info.Primary,
|
||||
seal: info.CanSeal,
|
||||
store: info.CanStore,
|
||||
copy: !info.Primary && len(si) > 1,
|
||||
main: !info.Primary && len(si) == 1, // only copy, but not primary
|
||||
|
||||
seal: info.CanSeal,
|
||||
store: info.CanStore,
|
||||
|
||||
state: st.State,
|
||||
})
|
||||
@ -680,7 +683,7 @@ var storageListSectorsCmd = &cli.Command{
|
||||
"Sector": e.id,
|
||||
"Type": e.ft.String(),
|
||||
"State": color.New(stateOrder[sealing.SectorState(e.state)].col).Sprint(e.state),
|
||||
"Primary": maybeStr(e.seal, color.FgGreen, "primary"),
|
||||
"Primary": maybeStr(e.primary, color.FgGreen, "primary") + maybeStr(e.copy, color.FgBlue, "copy") + maybeStr(e.main, color.FgRed, "main"),
|
||||
"Path use": maybeStr(e.seal, color.FgMagenta, "seal ") + maybeStr(e.store, color.FgCyan, "store"),
|
||||
"URLs": e.urls,
|
||||
}
|
||||
|
342
cmd/lotus-shed/datastore-vlog.go
Normal file
342
cmd/lotus-shed/datastore-vlog.go
Normal file
@ -0,0 +1,342 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v2/y"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/multiformats/go-base32"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var datastoreVlog2CarCmd = &cli.Command{
|
||||
Name: "vlog2car",
|
||||
Usage: "convert badger blockstore .vlog to .car",
|
||||
Flags: []cli.Flag{
|
||||
&cli.PathFlag{
|
||||
Name: "vlog",
|
||||
Usage: "vlog file",
|
||||
Required: true,
|
||||
},
|
||||
&cli.PathFlag{
|
||||
Name: "car",
|
||||
Usage: "out car file name (no .car)",
|
||||
Required: true,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "key-prefix",
|
||||
Usage: "datastore prefix",
|
||||
Value: "/blocks/",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := cctx.Context
|
||||
|
||||
maxSz := uint64(1 << 20)
|
||||
|
||||
carb := &rawCarb{
|
||||
max: maxSz,
|
||||
blocks: map[cid.Cid]block.Block{},
|
||||
}
|
||||
cars := 0
|
||||
|
||||
pref := cctx.String("key-prefix")
|
||||
plen := len(pref)
|
||||
|
||||
{
|
||||
// NOTE: Some bits of code in this code block come from https://github.com/dgraph-io/badger, which is licensed
|
||||
// under Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE
|
||||
|
||||
vf, err := os.Open(cctx.Path("vlog"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("open vlog file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := vf.Seek(20, io.SeekStart); err != nil {
|
||||
return xerrors.Errorf("seek past vlog start: %w", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(vf)
|
||||
read := &safeRead{
|
||||
k: make([]byte, 10),
|
||||
v: make([]byte, 10),
|
||||
recordOffset: 20,
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
e, err := read.Entry(reader)
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
break loop
|
||||
case err == io.ErrUnexpectedEOF || err == errTruncate:
|
||||
break loop
|
||||
case err != nil:
|
||||
return xerrors.Errorf("entry read error: %w", err)
|
||||
case e == nil:
|
||||
continue
|
||||
}
|
||||
|
||||
if e.meta&0x40 > 0 {
|
||||
e.Key = e.Key[:len(e.Key)-8]
|
||||
} else if e.meta > 0 {
|
||||
if e.meta&0x3f > 0 {
|
||||
log.Infof("unk meta m:%x; k:%x, v:%60x", e.meta, e.Key, e.Value)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
{
|
||||
if plen > 0 && !strings.HasPrefix(string(e.Key), pref) {
|
||||
log.Infow("no blocks prefix", "key", string(e.Key))
|
||||
continue
|
||||
}
|
||||
|
||||
h, err := base32.RawStdEncoding.DecodeString(string(e.Key[plen:]))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decode b32 ds key %x: %w", e.Key, err)
|
||||
}
|
||||
|
||||
c := cid.NewCidV1(cid.Raw, h)
|
||||
|
||||
b, err := block.NewBlockWithCid(e.Value, c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("readblk: %w", err)
|
||||
}
|
||||
|
||||
err = carb.consume(c, b)
|
||||
switch err {
|
||||
case nil:
|
||||
case errFullCar:
|
||||
root, err := carb.finalize()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("carb finalize: %w", err)
|
||||
}
|
||||
|
||||
if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil {
|
||||
return xerrors.Errorf("writeCar: %w", err)
|
||||
}
|
||||
|
||||
cars++
|
||||
|
||||
carb = &rawCarb{
|
||||
max: maxSz,
|
||||
blocks: map[cid.Cid]block.Block{},
|
||||
}
|
||||
|
||||
default:
|
||||
return xerrors.Errorf("carb consume: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := vf.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
root, err := carb.finalize()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("carb finalize: %w", err)
|
||||
}
|
||||
|
||||
if err := carb.writeCar(ctx, fmt.Sprintf("%s%d.car", cctx.Path("car"), cars), root); err != nil {
|
||||
return xerrors.Errorf("writeCar: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
// NOTE: Code below comes (with slight modifications) from https://github.com/dgraph-io/badger/blob/master/value.go
|
||||
// Apache 2.0; See https://github.com/dgraph-io/badger/blob/master/LICENSE
|
||||
|
||||
var errTruncate = errors.New("do truncate")
|
||||
|
||||
// hashReader implements io.Reader, io.ByteReader interfaces. It also keeps track of the number
|
||||
// bytes read. The hashReader writes to h (hash) what it reads from r.
|
||||
type hashReader struct {
|
||||
r io.Reader
|
||||
h hash.Hash32
|
||||
bytesRead int // Number of bytes read.
|
||||
}
|
||||
|
||||
func newHashReader(r io.Reader) *hashReader {
|
||||
hash := crc32.New(y.CastagnoliCrcTable)
|
||||
return &hashReader{
|
||||
r: r,
|
||||
h: hash,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads len(p) bytes from the reader. Returns the number of bytes read, error on failure.
|
||||
func (t *hashReader) Read(p []byte) (int, error) {
|
||||
n, err := t.r.Read(p)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
t.bytesRead += n
|
||||
return t.h.Write(p[:n])
|
||||
}
|
||||
|
||||
// ReadByte reads exactly one byte from the reader. Returns error on failure.
|
||||
func (t *hashReader) ReadByte() (byte, error) {
|
||||
b := make([]byte, 1)
|
||||
_, err := t.Read(b)
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
// Sum32 returns the sum32 of the underlying hash.
|
||||
func (t *hashReader) Sum32() uint32 {
|
||||
return t.h.Sum32()
|
||||
}
|
||||
|
||||
type safeRead struct {
|
||||
k []byte
|
||||
v []byte
|
||||
|
||||
recordOffset uint32
|
||||
}
|
||||
|
||||
// Entry provides Key, Value, UserMeta and ExpiresAt. This struct can be used by
|
||||
// the user to set data.
|
||||
type Entry struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
UserMeta byte
|
||||
ExpiresAt uint64 // time.Unix
|
||||
meta byte
|
||||
|
||||
// Fields maintained internally.
|
||||
offset uint32
|
||||
hlen int // Length of the header.
|
||||
}
|
||||
|
||||
// Entry reads an entry from the provided reader. It also validates the checksum for every entry
|
||||
// read. Returns error on failure.
|
||||
func (r *safeRead) Entry(reader io.Reader) (*Entry, error) {
|
||||
tee := newHashReader(reader)
|
||||
var h header
|
||||
hlen, err := h.DecodeFrom(tee)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.klen > uint32(1<<16) { // Key length must be below uint16.
|
||||
return nil, errTruncate
|
||||
}
|
||||
kl := int(h.klen)
|
||||
if cap(r.k) < kl {
|
||||
r.k = make([]byte, 2*kl)
|
||||
}
|
||||
vl := int(h.vlen)
|
||||
if cap(r.v) < vl {
|
||||
r.v = make([]byte, 2*vl)
|
||||
}
|
||||
|
||||
e := &Entry{}
|
||||
e.offset = r.recordOffset
|
||||
e.hlen = hlen
|
||||
buf := make([]byte, h.klen+h.vlen)
|
||||
if _, err := io.ReadFull(tee, buf[:]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = errTruncate
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
e.Key = buf[:h.klen]
|
||||
e.Value = buf[h.klen:]
|
||||
var crcBuf [crc32.Size]byte
|
||||
if _, err := io.ReadFull(reader, crcBuf[:]); err != nil {
|
||||
if err == io.EOF {
|
||||
err = errTruncate
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
crc := y.BytesToU32(crcBuf[:])
|
||||
if crc != tee.Sum32() {
|
||||
return nil, errTruncate
|
||||
}
|
||||
e.meta = h.meta
|
||||
e.UserMeta = h.userMeta
|
||||
e.ExpiresAt = h.expiresAt
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// header is used in value log as a header before Entry.
|
||||
type header struct {
|
||||
klen uint32
|
||||
vlen uint32
|
||||
expiresAt uint64
|
||||
meta byte
|
||||
userMeta byte
|
||||
}
|
||||
|
||||
// Encode encodes the header into []byte. The provided []byte should be atleast 5 bytes. The
|
||||
// function will panic if out []byte isn't large enough to hold all the values.
|
||||
// The encoded header looks like
|
||||
// +------+----------+------------+--------------+-----------+
|
||||
// | Meta | UserMeta | Key Length | Value Length | ExpiresAt |
|
||||
// +------+----------+------------+--------------+-----------+
|
||||
func (h header) Encode(out []byte) int {
|
||||
out[0], out[1] = h.meta, h.userMeta
|
||||
index := 2
|
||||
index += binary.PutUvarint(out[index:], uint64(h.klen))
|
||||
index += binary.PutUvarint(out[index:], uint64(h.vlen))
|
||||
index += binary.PutUvarint(out[index:], h.expiresAt)
|
||||
return index
|
||||
}
|
||||
|
||||
// Decode decodes the given header from the provided byte slice.
|
||||
// Returns the number of bytes read.
|
||||
func (h *header) Decode(buf []byte) int {
|
||||
h.meta, h.userMeta = buf[0], buf[1]
|
||||
index := 2
|
||||
klen, count := binary.Uvarint(buf[index:])
|
||||
h.klen = uint32(klen)
|
||||
index += count
|
||||
vlen, count := binary.Uvarint(buf[index:])
|
||||
h.vlen = uint32(vlen)
|
||||
index += count
|
||||
h.expiresAt, count = binary.Uvarint(buf[index:])
|
||||
return index + count
|
||||
}
|
||||
|
||||
// DecodeFrom reads the header from the hashReader.
|
||||
// Returns the number of bytes read.
|
||||
func (h *header) DecodeFrom(reader *hashReader) (int, error) {
|
||||
var err error
|
||||
h.meta, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
h.userMeta, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
klen, err := binary.ReadUvarint(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
h.klen = uint32(klen)
|
||||
vlen, err := binary.ReadUvarint(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
h.vlen = uint32(vlen)
|
||||
h.expiresAt, err = binary.ReadUvarint(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return reader.bytesRead, nil
|
||||
}
|
@ -32,6 +32,7 @@ var datastoreCmd = &cli.Command{
|
||||
datastoreListCmd,
|
||||
datastoreGetCmd,
|
||||
datastoreRewriteCmd,
|
||||
datastoreVlog2CarCmd,
|
||||
},
|
||||
}
|
||||
|
||||
|
118
cmd/lotus-shed/deal-label.go
Normal file
118
cmd/lotus-shed/deal-label.go
Normal file
@ -0,0 +1,118 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/specs-actors/v4/actors/util/adt"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var dealLabelCmd = &cli.Command{
|
||||
Name: "deal-label",
|
||||
Usage: "Scrape state to report on how many deals have non UTF-8 labels",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := context.TODO()
|
||||
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass state root")
|
||||
}
|
||||
|
||||
sroot, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse input: %w", err)
|
||||
}
|
||||
|
||||
fsrepo, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
tree, err := state.LoadStateTree(cst, sroot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ma, err := tree.GetActor(market.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ms, err := market.Load(store, ma)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ps, err := ms.Proposals()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var deals []abi.DealID
|
||||
if err = ps.ForEach(func(id abi.DealID, dp market.DealProposal) error {
|
||||
if !utf8.Valid([]byte(dp.Label)) {
|
||||
deals = append(deals, id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("there are ", len(deals), " bad labels")
|
||||
for _, d := range deals {
|
||||
fmt.Print(d, " ")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
@ -1,19 +1,38 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/dgraph-io/badger/v2"
|
||||
"github.com/dgraph-io/badger/v2/pb"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-blockservice"
|
||||
"github.com/ipfs/go-cid"
|
||||
offline "github.com/ipfs/go-ipfs-exchange-offline"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/multiformats/go-base32"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/urfave/cli/v2"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -39,6 +58,9 @@ var exportChainCmd = &cli.Command{
|
||||
Name: "skip-old-msgs",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
exportRawCmd,
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name to write export to"))
|
||||
@ -130,3 +152,351 @@ var exportChainCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var exportRawCmd = &cli.Command{
|
||||
Name: "raw",
|
||||
Description: "Export raw blocks from repo (requires node to be offline)",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "car-size",
|
||||
Value: "50M",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return lcli.ShowHelp(cctx, fmt.Errorf("must specify file name to write export to"))
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening fs repo: %w", err)
|
||||
}
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return xerrors.Errorf("lotus repo doesn't exist")
|
||||
}
|
||||
|
||||
lr, err := r.LockRO(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
out := cctx.Args().First()
|
||||
err = os.Mkdir(out, 0755)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("creating output dir: %w", err)
|
||||
}
|
||||
|
||||
maxSz, err := humanize.ParseBytes(cctx.String("car-size"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parse --car-size: %w", err)
|
||||
}
|
||||
|
||||
cars := 0
|
||||
|
||||
carb := &rawCarb{
|
||||
max: maxSz,
|
||||
blocks: map[cid.Cid]block.Block{},
|
||||
}
|
||||
|
||||
{
|
||||
consume := func(c cid.Cid, b block.Block) error {
|
||||
err = carb.consume(c, b)
|
||||
switch err {
|
||||
case nil:
|
||||
case errFullCar:
|
||||
root, err := carb.finalize()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("carb finalize: %w", err)
|
||||
}
|
||||
|
||||
if err := carb.writeCar(ctx, filepath.Join(out, fmt.Sprintf("chain%d.car", cars)), root); err != nil {
|
||||
return xerrors.Errorf("writeCar: %w", err)
|
||||
}
|
||||
|
||||
cars++
|
||||
|
||||
if cars > 10 {
|
||||
return xerrors.Errorf("enough")
|
||||
}
|
||||
|
||||
carb = &rawCarb{
|
||||
max: maxSz,
|
||||
blocks: map[cid.Cid]block.Block{},
|
||||
}
|
||||
|
||||
log.Infow("gc")
|
||||
go runtime.GC()
|
||||
|
||||
default:
|
||||
return xerrors.Errorf("carb consume: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
{
|
||||
path := filepath.Join(lr.Path(), "datastore", "chain")
|
||||
opts, err := repo.BadgerBlockstoreOptions(repo.UniversalBlockstore, path, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.Logger = &badgerLog{
|
||||
SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
|
||||
skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
|
||||
}
|
||||
|
||||
log.Infow("open db")
|
||||
|
||||
db, err := badger.Open(opts.Options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open badger blockstore: %w", err)
|
||||
}
|
||||
defer db.Close() // nolint:errcheck
|
||||
|
||||
log.Infow("new stream")
|
||||
|
||||
var wlk sync.Mutex
|
||||
|
||||
str := db.NewStream()
|
||||
str.NumGo = 16
|
||||
str.LogPrefix = "bstream"
|
||||
str.Send = func(list *pb.KVList) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
log.Errorw("send error", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, kv := range list.Kv {
|
||||
if kv.Key == nil || kv.Value == nil {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(string(kv.Key), "/blocks/") {
|
||||
log.Infow("no blocks prefix", "key", string(kv.Key))
|
||||
continue
|
||||
}
|
||||
|
||||
h, err := base32.RawStdEncoding.DecodeString(string(kv.Key[len("/blocks/"):]))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("decode b32 ds key %x: %w", kv.Key, err)
|
||||
}
|
||||
|
||||
c := cid.NewCidV1(cid.Raw, h)
|
||||
|
||||
b, err := block.NewBlockWithCid(kv.Value, c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("readblk: %w", err)
|
||||
}
|
||||
|
||||
wlk.Lock()
|
||||
err = consume(c, b)
|
||||
wlk.Unlock()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("consume stream block: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := str.Orchestrate(ctx); err != nil {
|
||||
return xerrors.Errorf("orchestrate stream: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Infow("write last")
|
||||
|
||||
root, err := carb.finalize()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("carb finalize: %w", err)
|
||||
}
|
||||
|
||||
if err := carb.writeCar(ctx, filepath.Join(out, fmt.Sprintf("chain%d.car", cars)), root); err != nil {
|
||||
return xerrors.Errorf("writeCar: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var errFullCar = errors.New("full")
|
||||
|
||||
const maxlinks = 16
|
||||
|
||||
type rawCarb struct {
|
||||
blockstore.Blockstore
|
||||
|
||||
max, cur uint64
|
||||
|
||||
nodes []*shedgen.CarbNode
|
||||
|
||||
blocks map[cid.Cid]block.Block
|
||||
}
|
||||
|
||||
func (rc *rawCarb) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
_, has := rc.blocks[c]
|
||||
return has, nil
|
||||
}
|
||||
|
||||
func (rc *rawCarb) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
|
||||
b, has := rc.blocks[c]
|
||||
if !has {
|
||||
return nil, blockstore.ErrNotFound
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (rc *rawCarb) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||
b, has := rc.blocks[c]
|
||||
if !has {
|
||||
return 0, blockstore.ErrNotFound
|
||||
}
|
||||
return len(b.RawData()), nil
|
||||
}
|
||||
|
||||
func (rc *rawCarb) checkNodes(maxl int) error {
|
||||
if len(rc.nodes) == 0 {
|
||||
log.Infow("add level", "l", 0)
|
||||
rc.nodes = append(rc.nodes, new(shedgen.CarbNode))
|
||||
}
|
||||
for i := 0; i < len(rc.nodes); i++ {
|
||||
if len(rc.nodes[i].Sub) <= maxl {
|
||||
break
|
||||
}
|
||||
if len(rc.nodes) <= i+1 {
|
||||
log.Infow("add level", "l", i+1)
|
||||
rc.nodes = append(rc.nodes, new(shedgen.CarbNode))
|
||||
}
|
||||
|
||||
var bb bytes.Buffer
|
||||
if err := rc.nodes[i].MarshalCBOR(&bb); err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := cid.Prefix{
|
||||
Version: 1,
|
||||
Codec: cid.DagCBOR,
|
||||
MhType: mh.SHA2_256,
|
||||
MhLength: -1,
|
||||
}.Sum(bb.Bytes())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("gen cid: %w", err)
|
||||
}
|
||||
|
||||
b, err := block.NewBlockWithCid(bb.Bytes(), c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("new block: %w", err)
|
||||
}
|
||||
|
||||
if i > 1 {
|
||||
log.Infow("compact", "from", i, "to", i+1, "sub", c.String())
|
||||
}
|
||||
|
||||
rc.nodes[i+1].Sub = append(rc.nodes[i+1].Sub, c)
|
||||
rc.blocks[c] = b
|
||||
rc.nodes[i] = new(shedgen.CarbNode)
|
||||
rc.cur += uint64(bb.Len())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *rawCarb) consume(c cid.Cid, b block.Block) error {
|
||||
if err := rc.checkNodes(maxlinks); err != nil {
|
||||
return err
|
||||
}
|
||||
if rc.cur+uint64(len(b.RawData())) > rc.max {
|
||||
return errFullCar
|
||||
}
|
||||
|
||||
rc.cur += uint64(len(b.RawData()))
|
||||
|
||||
b, err := block.NewBlockWithCid(b.RawData(), c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create raw block: %w", err)
|
||||
}
|
||||
|
||||
rc.blocks[c] = b
|
||||
rc.nodes[0].Sub = append(rc.nodes[0].Sub, c)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *rawCarb) finalize() (cid.Cid, error) {
|
||||
if len(rc.nodes) == 0 {
|
||||
rc.nodes = append(rc.nodes, new(shedgen.CarbNode))
|
||||
}
|
||||
|
||||
for i := 0; i < len(rc.nodes); i++ {
|
||||
var bb bytes.Buffer
|
||||
if err := rc.nodes[i].MarshalCBOR(&bb); err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
c, err := cid.Prefix{
|
||||
Version: 1,
|
||||
Codec: cid.DagCBOR,
|
||||
MhType: mh.SHA2_256,
|
||||
MhLength: -1,
|
||||
}.Sum(bb.Bytes())
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("gen cid: %w", err)
|
||||
}
|
||||
|
||||
b, err := block.NewBlockWithCid(bb.Bytes(), c)
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("new block: %w", err)
|
||||
}
|
||||
|
||||
log.Infow("fin", "level", i, "cid", c.String())
|
||||
|
||||
rc.blocks[c] = b
|
||||
rc.nodes[i] = new(shedgen.CarbNode)
|
||||
rc.cur += uint64(bb.Len())
|
||||
|
||||
if len(rc.nodes[i].Sub) <= 1 && i == len(rc.nodes)-1 {
|
||||
return c, err
|
||||
}
|
||||
if len(rc.nodes) <= i+1 {
|
||||
rc.nodes = append(rc.nodes, new(shedgen.CarbNode))
|
||||
}
|
||||
rc.nodes[i+1].Sub = append(rc.nodes[i+1].Sub, c)
|
||||
}
|
||||
return cid.Undef, xerrors.Errorf("failed to finalize")
|
||||
}
|
||||
|
||||
func (rc *rawCarb) writeCar(ctx context.Context, path string, root cid.Cid) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create out car: %w", err)
|
||||
}
|
||||
|
||||
bs := rc
|
||||
ds := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs)))
|
||||
|
||||
log.Infow("write car", "path", path, "root", root.String(), "blocks", len(rc.blocks))
|
||||
|
||||
return car.WriteCar(ctx, ds, []cid.Cid{root}, f)
|
||||
}
|
||||
|
||||
var _ blockstore.Blockstore = &rawCarb{}
|
||||
|
||||
type badgerLog struct {
|
||||
*zap.SugaredLogger
|
||||
skip2 *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (b *badgerLog) Warningf(format string, args ...interface{}) {
|
||||
b.skip2.Warnf(format, args...)
|
||||
}
|
||||
|
104
cmd/lotus-shed/itestd.go
Normal file
104
cmd/lotus-shed/itestd.go
Normal file
@ -0,0 +1,104 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/chzyer/readline"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
var itestdCmd = &cli.Command{
|
||||
Name: "itestd",
|
||||
Description: "Integration test debug env",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "listen",
|
||||
Value: "127.0.0.1:5674",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
var nodes []kit.ItestdNotif
|
||||
|
||||
m := http.NewServeMux()
|
||||
m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
var notif kit.ItestdNotif
|
||||
if err := json.NewDecoder(r.Body).Decode(¬if); err != nil {
|
||||
fmt.Printf("!! Decode itest notif: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("%d @%s '%s=%s'\n", len(nodes), notif.TestName, notif.NodeType, notif.Api)
|
||||
nodes = append(nodes, notif)
|
||||
})
|
||||
l, err := net.Listen("tcp", cctx.String("listen"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("net listen: %w", err)
|
||||
}
|
||||
s := &httptest.Server{
|
||||
Listener: l,
|
||||
Config: &http.Server{Handler: m},
|
||||
}
|
||||
s.Start()
|
||||
fmt.Printf("ITest env:\n\nLOTUS_ITESTD=%s\n\nSay 'sh' to spawn a shell connected to test nodes\n--- waiting for clients\n", s.URL)
|
||||
|
||||
cs := readline.NewCancelableStdin(os.Stdin)
|
||||
go func() {
|
||||
<-cctx.Done()
|
||||
cs.Close() // nolint:errcheck
|
||||
}()
|
||||
|
||||
rl := bufio.NewReader(cs)
|
||||
|
||||
for {
|
||||
cmd, _, err := rl.ReadLine()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("readline: %w", err)
|
||||
}
|
||||
|
||||
switch string(cmd) {
|
||||
case "sh":
|
||||
shell := "/bin/sh"
|
||||
if os.Getenv("SHELL") != "" {
|
||||
shell = os.Getenv("SHELL")
|
||||
}
|
||||
|
||||
p := exec.Command(shell, "-i")
|
||||
p.Env = append(p.Env, os.Environ()...)
|
||||
lastNodes := map[string]string{}
|
||||
for _, node := range nodes {
|
||||
lastNodes[node.NodeType] = node.Api
|
||||
}
|
||||
if _, found := lastNodes["MARKETS_API_INFO"]; !found {
|
||||
lastNodes["MARKETS_API_INFO"] = lastNodes["MINER_API_INFO"]
|
||||
}
|
||||
for typ, api := range lastNodes {
|
||||
p.Env = append(p.Env, fmt.Sprintf("%s=%s", typ, api))
|
||||
}
|
||||
|
||||
p.Stdout = os.Stdout
|
||||
p.Stderr = os.Stderr
|
||||
p.Stdin = os.Stdin
|
||||
if err := p.Start(); err != nil {
|
||||
return xerrors.Errorf("start shell: %w", err)
|
||||
}
|
||||
if err := p.Wait(); err != nil {
|
||||
fmt.Printf("wait for shell: %s\n", err)
|
||||
}
|
||||
fmt.Println("\n--- shell quit")
|
||||
|
||||
default:
|
||||
fmt.Println("!! Unknown command")
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
@ -22,6 +22,7 @@ func main() {
|
||||
bitFieldCmd,
|
||||
cronWcCmd,
|
||||
frozenMinersCmd,
|
||||
dealLabelCmd,
|
||||
keyinfoCmd,
|
||||
jwtCmd,
|
||||
noncefix,
|
||||
@ -69,6 +70,7 @@ func main() {
|
||||
terminationsCmd,
|
||||
migrationsCmd,
|
||||
diffCmd,
|
||||
itestdCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
|
||||
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
|
||||
msig5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
@ -29,6 +31,9 @@ var minerMultisigsCmd = &cli.Command{
|
||||
mmApproveWithdrawBalance,
|
||||
mmProposeChangeOwner,
|
||||
mmApproveChangeOwner,
|
||||
mmProposeChangeWorker,
|
||||
mmConfirmChangeWorker,
|
||||
mmProposeControlSet,
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
@ -368,6 +373,301 @@ var mmApproveChangeOwner = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var mmProposeChangeWorker = &cli.Command{
|
||||
Name: "propose-change-worker",
|
||||
Usage: "Propose an worker address change",
|
||||
ArgsUsage: "[newWorker]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass new worker address")
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
na, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mi.NewWorker.Empty() {
|
||||
if mi.Worker == newAddr {
|
||||
return fmt.Errorf("worker address already set to %s", na)
|
||||
}
|
||||
} else {
|
||||
if mi.NewWorker == newAddr {
|
||||
fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na)
|
||||
fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch)
|
||||
return fmt.Errorf("change to worker address %s already pending", na)
|
||||
}
|
||||
}
|
||||
|
||||
cwp := &miner2.ChangeWorkerAddressParams{
|
||||
NewWorker: newAddr,
|
||||
NewControlAddrs: mi.ControlAddresses,
|
||||
}
|
||||
|
||||
fmt.Fprintf(cctx.App.Writer, "newAddr: %s\n", newAddr)
|
||||
fmt.Fprintf(cctx.App.Writer, "NewControlAddrs: %s\n", mi.ControlAddresses)
|
||||
|
||||
sp, err := actors.SerializeParams(cwp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeWorkerAddress), sp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("proposing message: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
var retval msig5.ProposeReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Transaction ID: %d\n", retval.TxnID)
|
||||
if retval.Applied {
|
||||
fmt.Printf("Transaction was executed during propose\n")
|
||||
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var mmConfirmChangeWorker = &cli.Command{
|
||||
Name: "confirm-change-worker",
|
||||
Usage: "Confirm an worker address change",
|
||||
ArgsUsage: "[newWorker]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass new worker address")
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
na, err := address.NewFromString(cctx.Args().First())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAddr, err := api.StateLookupID(ctx, na, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mi.NewWorker.Empty() {
|
||||
return xerrors.Errorf("no worker key change proposed")
|
||||
} else if mi.NewWorker != newAddr {
|
||||
return xerrors.Errorf("worker key %s does not match current worker key proposal %s", newAddr, mi.NewWorker)
|
||||
}
|
||||
|
||||
if head, err := api.ChainHead(ctx); err != nil {
|
||||
return xerrors.Errorf("failed to get the chain head: %w", err)
|
||||
} else if head.Height() < mi.WorkerChangeEpoch {
|
||||
return xerrors.Errorf("worker key change cannot be confirmed until %d, current height is %d", mi.WorkerChangeEpoch, head.Height())
|
||||
}
|
||||
|
||||
pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ConfirmUpdateWorkerKey), nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("proposing message: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
var retval msig5.ProposeReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Transaction ID: %d\n", retval.TxnID)
|
||||
if retval.Applied {
|
||||
fmt.Printf("Transaction was executed during propose\n")
|
||||
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var mmProposeControlSet = &cli.Command{
|
||||
Name: "propose-control-set",
|
||||
Usage: "Set control address(-es)",
|
||||
ArgsUsage: "[...address]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass new owner address")
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
multisigAddr, sender, minerAddr, err := getInputs(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mi, err := api.StateMinerInfo(ctx, minerAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
del := map[address.Address]struct{}{}
|
||||
existing := map[address.Address]struct{}{}
|
||||
for _, controlAddress := range mi.ControlAddresses {
|
||||
ka, err := api.StateAccountKey(ctx, controlAddress, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
del[ka] = struct{}{}
|
||||
existing[ka] = struct{}{}
|
||||
}
|
||||
|
||||
var toSet []address.Address
|
||||
|
||||
for i, as := range cctx.Args().Slice() {
|
||||
a, err := address.NewFromString(as)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("parsing address %d: %w", i, err)
|
||||
}
|
||||
|
||||
ka, err := api.StateAccountKey(ctx, a, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure the address exists on chain
|
||||
_, err = api.StateLookupID(ctx, ka, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("looking up %s: %w", ka, err)
|
||||
}
|
||||
|
||||
delete(del, ka)
|
||||
toSet = append(toSet, ka)
|
||||
}
|
||||
|
||||
for a := range del {
|
||||
fmt.Println("Remove", a)
|
||||
}
|
||||
for _, a := range toSet {
|
||||
if _, exists := existing[a]; !exists {
|
||||
fmt.Println("Add", a)
|
||||
}
|
||||
}
|
||||
|
||||
cwp := &miner2.ChangeWorkerAddressParams{
|
||||
NewWorker: mi.Worker,
|
||||
NewControlAddrs: toSet,
|
||||
}
|
||||
|
||||
sp, err := actors.SerializeParams(cwp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
pcid, err := api.MsigPropose(ctx, multisigAddr, minerAddr, big.Zero(), sender, uint64(miner.Methods.ChangeWorkerAddress), sp)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("proposing message: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose Message CID:", pcid)
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, pcid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check it executed successfully
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
fmt.Fprintln(cctx.App.Writer, "Propose worker change tx failed!")
|
||||
return err
|
||||
}
|
||||
|
||||
var retval msig5.ProposeReturn
|
||||
if err := retval.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal propose return value: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Transaction ID: %d\n", retval.TxnID)
|
||||
if retval.Applied {
|
||||
fmt.Printf("Transaction was executed during propose\n")
|
||||
fmt.Printf("Exit Code: %d\n", retval.Code)
|
||||
fmt.Printf("Return Value: %x\n", retval.Ret)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func getInputs(cctx *cli.Context) (address.Address, address.Address, address.Address, error) {
|
||||
multisigAddr, err := address.NewFromString(cctx.String("multisig"))
|
||||
if err != nil {
|
||||
|
128
cmd/lotus-shed/shedgen/cbor_gen.go
Normal file
128
cmd/lotus-shed/shedgen/cbor_gen.go
Normal file
@ -0,0 +1,128 @@
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
package shedgen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
func (t *CarbNode) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{161}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Sub ([]cid.Cid) (slice)
|
||||
if len("Sub") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Sub\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Sub"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Sub")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Sub) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Sub was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Sub))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Sub {
|
||||
if err := cbg.WriteCidBuf(scratch, w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Sub: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *CarbNode) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = CarbNode{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("CarbNode: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Sub ([]cid.Cid) (slice)
|
||||
case "Sub":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Sub: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Sub = make([]cid.Cid, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Sub failed: %w", err)
|
||||
}
|
||||
t.Sub[i] = c
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
7
cmd/lotus-shed/shedgen/rawexport.go
Normal file
7
cmd/lotus-shed/shedgen/rawexport.go
Normal file
@ -0,0 +1,7 @@
|
||||
package shedgen
|
||||
|
||||
import "github.com/ipfs/go-cid"
|
||||
|
||||
type CarbNode struct {
|
||||
Sub []cid.Cid
|
||||
}
|
@ -44,7 +44,7 @@ type BlockBuilder struct {
|
||||
|
||||
parentTs *types.TipSet
|
||||
parentSt *state.StateTree
|
||||
vm *vm.VM
|
||||
vm *vm.LegacyVM
|
||||
sm *stmgr.StateManager
|
||||
|
||||
gasTotal int64
|
||||
@ -73,9 +73,9 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S
|
||||
parentSt: parentSt,
|
||||
}
|
||||
|
||||
// Then we construct a VM to execute messages for gas estimation.
|
||||
// Then we construct a LegacyVM to execute messages for gas estimation.
|
||||
//
|
||||
// Most parts of this VM are "real" except:
|
||||
// Most parts of this LegacyVM are "real" except:
|
||||
// 1. We don't charge a fee.
|
||||
// 2. The runtime has "fake" proof logic.
|
||||
// 3. We don't actually save any of the results.
|
||||
@ -92,7 +92,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S
|
||||
BaseFee: abi.NewTokenAmount(0),
|
||||
LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs),
|
||||
}
|
||||
bb.vm, err = vm.NewVM(bb.ctx, vmopt)
|
||||
bb.vm, err = vm.NewLegacyVM(bb.ctx, vmopt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -190,12 +190,12 @@ func (bb *BlockBuilder) PushMessage(msg *types.Message) (*types.MessageReceipt,
|
||||
return &ret.MessageReceipt, nil
|
||||
}
|
||||
|
||||
// ActorStore returns the VM's current (pending) blockstore.
|
||||
// ActorStore returns the LegacyVM's current (pending) blockstore.
|
||||
func (bb *BlockBuilder) ActorStore() adt.Store {
|
||||
return bb.vm.ActorStore(bb.ctx)
|
||||
}
|
||||
|
||||
// StateTree returns the VM's current (pending) state-tree. This includes any changes made by
|
||||
// StateTree returns the LegacyVM's current (pending) state-tree. This includes any changes made by
|
||||
// successfully pushed messages.
|
||||
//
|
||||
// You probably want ParentStateTree
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package stages
|
||||
|
||||
import (
|
||||
@ -13,6 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCommitQueue(t *testing.T) {
|
||||
//stm: @CMD_COMMIT_Q_ENQUEUE_COMMIT_001
|
||||
var q commitQueue
|
||||
addr1, err := address.NewIDAddress(1000)
|
||||
require.NoError(t, err)
|
||||
@ -46,6 +48,7 @@ func TestCommitQueue(t *testing.T) {
|
||||
SectorNumber: 6,
|
||||
}))
|
||||
|
||||
//stm: @CMD_COMMIT_Q_ADVANCE_EPOCH_001, @CMD_COMMIT_Q_NEXT_MINER_001
|
||||
epoch := abi.ChainEpoch(0)
|
||||
q.advanceEpoch(epoch)
|
||||
_, _, ok := q.nextMiner()
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -10,6 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func TestProtocolCodenames(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_EPOCH_CODENAMES_001
|
||||
if height := abi.ChainEpoch(100); GetProtocolCodename(height) != "genesis" {
|
||||
t.Fatal("expected genesis codename")
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #chaos
|
||||
package chaos
|
||||
|
||||
import (
|
||||
@ -15,6 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSingleton(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_BUILDER_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -29,6 +31,7 @@ func TestSingleton(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCallerValidationNone(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -40,6 +43,7 @@ func TestCallerValidationNone(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCallerValidationIs(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001
|
||||
caller := atesting2.NewIDAddr(t, 100)
|
||||
receiver := atesting2.NewIDAddr(t, 101)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
@ -69,6 +73,7 @@ func TestCallerValidationIs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCallerValidationType(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001
|
||||
caller := atesting2.NewIDAddr(t, 100)
|
||||
receiver := atesting2.NewIDAddr(t, 101)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
@ -95,6 +100,7 @@ func TestCallerValidationType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCallerValidationInvalidBranch(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CALLER_VALIDATION_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -108,6 +114,7 @@ func TestCallerValidationInvalidBranch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteActor(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CREATE_ACTOR_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
beneficiary := atesting2.NewIDAddr(t, 101)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
@ -122,6 +129,7 @@ func TestDeleteActor(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMutateStateInTransaction(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -149,6 +157,7 @@ func TestMutateStateInTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMutateStateAfterTransaction(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -183,6 +192,7 @@ func TestMutateStateAfterTransaction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMutateStateReadonly(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_CREATE_STATE_001, @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -217,6 +227,7 @@ func TestMutateStateReadonly(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMutateStateInvalidBranch(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_MUTATE_STATE_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -231,6 +242,7 @@ func TestMutateStateInvalidBranch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAbortWith(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_ABORT_WITH_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -249,6 +261,7 @@ func TestAbortWith(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAbortWithUncontrolled(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_ABORT_WITH_001
|
||||
receiver := atesting2.NewIDAddr(t, 100)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
||||
@ -266,6 +279,7 @@ func TestAbortWithUncontrolled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInspectRuntime(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_CHAOS_INSPECT_RUNTIME_001, @CHAIN_ACTOR_CHAOS_CREATE_STATE_001
|
||||
caller := atesting2.NewIDAddr(t, 100)
|
||||
receiver := atesting2.NewIDAddr(t, 101)
|
||||
builder := mock2.NewBuilder(context.Background(), receiver)
|
||||
|
@ -1,3 +1,6 @@
|
||||
//stm: ignore
|
||||
// This file does not test any behaviors by itself; rather, it runs other test files
|
||||
// Therefore, this file should not be annotated.
|
||||
package conformance
|
||||
|
||||
import (
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user