Merge pull request #8612 from filecoin-project/release/v1.15.2
build: release: v1.15.2
This commit is contained in:
commit
518dc962ee
@ -1,12 +1,11 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
go: gotest/tools@0.0.13
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.16.4
|
||||
- image: cimg/go:1.17.9
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
@ -25,8 +24,9 @@ executors:
|
||||
commands:
|
||||
install-deps:
|
||||
steps:
|
||||
- go/install-ssh
|
||||
- go/install: {package: git}
|
||||
- run: |
|
||||
sudo apt update
|
||||
sudo apt install python-is-python3
|
||||
prepare:
|
||||
parameters:
|
||||
linux:
|
||||
@ -110,8 +110,12 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-tidy-check
|
||||
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
name: Check git diff
|
||||
command: |
|
||||
git --no-pager diff go.mod go.sum
|
||||
git --no-pager diff --quiet go.mod go.sum
|
||||
build-all:
|
||||
executor: golang
|
||||
steps:
|
||||
@ -188,9 +192,6 @@ jobs:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
@ -215,8 +216,6 @@ jobs:
|
||||
- when:
|
||||
condition: << parameters.codecov-upload >>
|
||||
steps:
|
||||
- go/install: {package: bash}
|
||||
- go/install: {package: curl}
|
||||
- run:
|
||||
shell: /bin/bash -eo pipefail
|
||||
command: |
|
||||
@ -255,9 +254,6 @@ jobs:
|
||||
cd extern/test-vectors
|
||||
git fetch
|
||||
git checkout origin/<< parameters.vectors-branch >>
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: install statediff globally
|
||||
command: |
|
||||
@ -370,8 +366,8 @@ jobs:
|
||||
- run:
|
||||
name: Install go
|
||||
command: |
|
||||
curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
|
||||
curl -O https://dl.google.com/go/go1.17.9.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.17.9.darwin-amd64.pkg -target /
|
||||
- run:
|
||||
name: Install pkg-config
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||
@ -392,7 +388,6 @@ jobs:
|
||||
- restore_cache:
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- install-deps
|
||||
- run:
|
||||
command: make build
|
||||
no_output_timeout: 30m
|
||||
@ -512,9 +507,6 @@ jobs:
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
golangci-lint-version:
|
||||
type: string
|
||||
default: 1.27.0
|
||||
concurrency:
|
||||
type: string
|
||||
default: '2'
|
||||
@ -533,13 +525,10 @@ jobs:
|
||||
- run:
|
||||
command: make deps
|
||||
no_output_timeout: 30m
|
||||
- go/install-golangci-lint:
|
||||
gobin: $HOME/.local/bin
|
||||
version: << parameters.golangci-lint-version >>
|
||||
- run:
|
||||
name: Lint
|
||||
command: |
|
||||
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
||||
golangci-lint run -v --timeout 2m \
|
||||
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||
lint-all:
|
||||
<<: *lint
|
||||
@ -909,6 +898,11 @@ workflows:
|
||||
suite: itest-sector_finalize_early
|
||||
target: "./itests/sector_finalize_early_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-sector_make_cc_avail
|
||||
suite: itest-sector_make_cc_avail
|
||||
target: "./itests/sector_make_cc_avail_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-sector_miner_collateral
|
||||
suite: itest-sector_miner_collateral
|
||||
@ -919,6 +913,16 @@ workflows:
|
||||
suite: itest-sector_pledge
|
||||
target: "./itests/sector_pledge_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-sector_prefer_no_upgrade
|
||||
suite: itest-sector_prefer_no_upgrade
|
||||
target: "./itests/sector_prefer_no_upgrade_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-sector_revert_available
|
||||
suite: itest-sector_revert_available
|
||||
target: "./itests/sector_revert_available_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-sector_terminate
|
||||
suite: itest-sector_terminate
|
||||
@ -949,6 +953,11 @@ workflows:
|
||||
suite: itest-wdpost
|
||||
target: "./itests/wdpost_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-worker
|
||||
suite: itest-worker
|
||||
target: "./itests/worker_test.go"
|
||||
|
||||
- test:
|
||||
name: test-unit-cli
|
||||
suite: utest-unit-cli
|
||||
@ -1004,9 +1013,6 @@ workflows:
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
|
@ -1,12 +1,11 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
go: gotest/tools@0.0.13
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.16.4
|
||||
- image: cimg/go:1.17.9
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
@ -25,8 +24,9 @@ executors:
|
||||
commands:
|
||||
install-deps:
|
||||
steps:
|
||||
- go/install-ssh
|
||||
- go/install: {package: git}
|
||||
- run: |
|
||||
sudo apt update
|
||||
sudo apt install python-is-python3
|
||||
prepare:
|
||||
parameters:
|
||||
linux:
|
||||
@ -110,8 +110,12 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-tidy-check
|
||||
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
name: Check git diff
|
||||
command: |
|
||||
git --no-pager diff go.mod go.sum
|
||||
git --no-pager diff --quiet go.mod go.sum
|
||||
build-all:
|
||||
executor: golang
|
||||
steps:
|
||||
@ -188,9 +192,6 @@ jobs:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
@ -215,8 +216,6 @@ jobs:
|
||||
- when:
|
||||
condition: << parameters.codecov-upload >>
|
||||
steps:
|
||||
- go/install: {package: bash}
|
||||
- go/install: {package: curl}
|
||||
- run:
|
||||
shell: /bin/bash -eo pipefail
|
||||
command: |
|
||||
@ -255,9 +254,6 @@ jobs:
|
||||
cd extern/test-vectors
|
||||
git fetch
|
||||
git checkout origin/<< parameters.vectors-branch >>
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: install statediff globally
|
||||
command: |
|
||||
@ -370,8 +366,8 @@ jobs:
|
||||
- run:
|
||||
name: Install go
|
||||
command: |
|
||||
curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
|
||||
curl -O https://dl.google.com/go/go1.17.9.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.17.9.darwin-amd64.pkg -target /
|
||||
- run:
|
||||
name: Install pkg-config
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||
@ -392,7 +388,6 @@ jobs:
|
||||
- restore_cache:
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- install-deps
|
||||
- run:
|
||||
command: make build
|
||||
no_output_timeout: 30m
|
||||
@ -512,9 +507,6 @@ jobs:
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
golangci-lint-version:
|
||||
type: string
|
||||
default: 1.27.0
|
||||
concurrency:
|
||||
type: string
|
||||
default: '2'
|
||||
@ -533,13 +525,10 @@ jobs:
|
||||
- run:
|
||||
command: make deps
|
||||
no_output_timeout: 30m
|
||||
- go/install-golangci-lint:
|
||||
gobin: $HOME/.local/bin
|
||||
version: << parameters.golangci-lint-version >>
|
||||
- run:
|
||||
name: Lint
|
||||
command: |
|
||||
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
||||
golangci-lint run -v --timeout 2m \
|
||||
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||
lint-all:
|
||||
<<: *lint
|
||||
@ -829,9 +818,6 @@ workflows:
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
|
@ -17,6 +17,7 @@ coverage:
|
||||
status:
|
||||
patch: off
|
||||
project:
|
||||
threshold: 1%
|
||||
tools-and-tests:
|
||||
target: auto
|
||||
threshold: 1%
|
||||
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: '1.16.4'
|
||||
go-version: '1.17.9'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -41,6 +41,7 @@ build/paramfetch.sh
|
||||
/darwin
|
||||
/linux
|
||||
*.snap
|
||||
build/builtin-actors
|
||||
|
||||
*-fuzz.zip
|
||||
/chain/types/work_msg/
|
||||
|
@ -25,7 +25,7 @@ skip-dirs:
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
|
||||
- "by other packages, and that stutters; consider calling this"
|
||||
- "Potential file inclusion via variable"
|
||||
- "should have( a package)? comment"
|
||||
- "Error return value of `logging.SetLogLevel` is not checked"
|
||||
@ -37,6 +37,9 @@ issues:
|
||||
- "string .* has .* occurrences, make it a constant"
|
||||
- "a blank import should be only in a main or test package, or have a comment justifying it"
|
||||
- "package comment should be of the form"
|
||||
- "Potential hardcoded credentials"
|
||||
- "Use of weak random number generator"
|
||||
- "xerrors.* is deprecated"
|
||||
|
||||
exclude-use-default: false
|
||||
exclude-rules:
|
||||
|
151
CHANGELOG.md
151
CHANGELOG.md
@ -1,5 +1,156 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.15.2 / 2022-05-06
|
||||
|
||||
This is a highly recommended feature lotus release v1.15.2. This feature release introduces many new features and for SPs, including PoSt workers, sealing scheduler, snap deal queue and so on.
|
||||
|
||||
Note: You need to be using go v1.17.9&up from this release onwards.
|
||||
|
||||
## Highlights
|
||||
### ❣️❣️❣️ PoSt Workers ❣️❣️❣️
|
||||
‼️️Attention - the long-awaited yet highly requested PoSt workers, they are here! And they come in as a combo: you may setup PoSt workers for both winningPoSt or/and windowPoSt worker. You can also setup any number of PoSt workers as long as you have the hardware resources!
|
||||
For more details and learn how to set it up, see the docs [here](https://lotus.filecoin.io/storage-providers/seal-workers/post-workers/). You can also find early result of the PoSt workers performance by the community [here](https://github.com/filecoin-project/lotus/discussions/8375).
|
||||
- feat: PoSt workers ([filecoin-project/lotus#7971](https://github.com/filecoin-project/lotus/pull/7971))
|
||||
- feat: storage: Parallel proving checks ([filecoin-project/lotus#8391](https://github.com/filecoin-project/lotus/pull/8391))
|
||||
- adjust `ParallelCheckLimit` according to your resource setup to get pre-post checkers run faster!
|
||||
|
||||
In addition, we also added some handy toolings:
|
||||
- feat: miner: API/CLI to compute window-post ([filecoin-project/lotus#8389](https://github.com/filecoin-project/lotus/pull/8389))
|
||||
- run `lotus-miner proving compute window-post` to manually trigger a full windowPoSt computation for a specific deadline for full sanity checks.
|
||||
- feat: miner cli: Separate proving workers command ([filecoin-project/lotus#8379](https://github.com/filecoin-project/lotus/pull/8379))
|
||||
- run `lotus-miner proving workers` to list all the PoSt workers that's attached
|
||||
- feat: miner cli: proving check --faulty, faults in storage list sectors ([filecoin-project/lotus#8349](https://github.com/filecoin-project/lotus/pull/8349))
|
||||
- run `lotus-miner proving check --faulty` to identify the sectors that might be bad.
|
||||
|
||||
### 🔥🔥🔥 Sealing Scheduler Enhancement 🔥🔥🔥
|
||||
|
||||
Have you ever got a couple workers but only a few of them are super packed, while the rest are idling waiting for jobs? Now the task can be distributed more evenly with:
|
||||
- feat: sched: Improve worker assigning logic ([filecoin-project/lotus#8447](https://github.com/filecoin-project/lotus/pull/8447))
|
||||
|
||||
### 🌟🌟🌟 Snap Deal Enhancements 🌟🌟🌟
|
||||
|
||||
The Filecoin Network introduced Snap Deal with the network v15 OhSnap upgrade, and lotus shipped v1.14.0 with basic snappy support for SP to use this feature. Since then, we have received good ux feedbacks and bug reports from the community, and we are introducing a couple enhancement for SPs to better leverage this feature.
|
||||
|
||||
- feat: sealing: Sector upgrade queue ([filecoin-project/lotus#8330](https://github.com/filecoin-project/lotus/pull/8330))
|
||||
- Snap up CCs to be ready for deals automatically, learn more [here](https://lotus.filecoin.io/storage-providers/operate/snap-deals/#snap-deal-queue)
|
||||
- feat: sealing: More SnapDeals config knobs ([filecoin-project/lotus#8343](https://github.com/filecoin-project/lotus/pull/8343))
|
||||
- SPs can now set `PreferNewSectorsForDeals` and `MaxUpgradingSectors` to better manage their sealing/dealing pipeline.
|
||||
- feat: config: Move MakeNewSectorForDeals config into the Sealing sectoin ([filecoin-project/lotus#8378](https://github.com/filecoin-project/lotus/pull/8378))
|
||||
- fix: sealing: Release unsealed sector files after snapdeals abort ([filecoin-project/lotus#8438](https://github.com/filecoin-project/lotus/pull/8438))
|
||||
- fix: sealing: always do cooldown in handleSubmitReplicaUpdateFailed ([filecoin-project/lotus#8353](https://github.com/filecoin-project/lotus/pull/8353))
|
||||
- fix: storagefsm: Fix error loop on bad event ([filecoin-project/lotus#8338](https://github.com/filecoin-project/lotus/pull/8338))
|
||||
- fix: sealing: Remove sector copies from workers after snapdeals ([filecoin-project/lotus#8329](https://github.com/filecoin-project/lotus/pull/8329))
|
||||
|
||||
## New Features
|
||||
- enable rcmgr by default ([filecoin-project/lotus#8470](https://github.com/filecoin-project/lotus/pull/8470))
|
||||
- feat: cli: lotus client list-asks --protocols ([filecoin-project/lotus#8464](https://github.com/filecoin-project/lotus/pull/8464))
|
||||
- feat: shed: Multi-file vlog2car ([filecoin-project/lotus#8426](https://github.com/filecoin-project/lotus/pull/8426))
|
||||
- feat: worker: check fd limit on startup ([filecoin-project/lotus#8390](https://github.com/filecoin-project/lotus/pull/8390))
|
||||
- feat: lotus-shed: add command to compute state over a range of tipsets. ([filecoin-project/lotus#8371](https://github.com/filecoin-project/lotus/pull/8371))
|
||||
- feat: cli/net: implement 'net ping' command ([filecoin-project/lotus#8357](https://github.com/filecoin-project/lotus/pull/8357))
|
||||
- feat: stmgr: call: use a buffered concurrent-access blockstore ([filecoin-project/lotus#8358](https://github.com/filecoin-project/lotus/pull/8358))
|
||||
- feat: infra/ci: add `lotus-test` image as CI build step ([filecoin-project/lotus#7956](https://github.com/filecoin-project/lotus/pull/7956))
|
||||
- feat: multisig: lotus-sheed miner-multisig change-worker command. ([filecoin-project/lotus#8281](https://github.com/filecoin-project/lotus/pull/8281))
|
||||
- feat: Add additional test annotations (#8272) ([filecoin-project/lotus#8272](https://github.com/filecoin-project/lotus/pull/8272))
|
||||
|
||||
## Improvements
|
||||
- Revert appimage removal ([filecoin-project/lotus#8439](https://github.com/filecoin-project/lotus/pull/8439))
|
||||
- sealing: Don't panic in ReleaseUnsealed with no ranges ([filecoin-project/lotus#8461](https://github.com/filecoin-project/lotus/pull/8461))
|
||||
- testkit: give up on waiting for the RPC server to shutdown after 1s ([filecoin-project/lotus#8450](https://github.com/filecoin-project/lotus/pull/8450))
|
||||
- chore: events: implement event observer deregister method ([filecoin-project/lotus#8441](https://github.com/filecoin-project/lotus/pull/8441))
|
||||
- Thread safe piecereader ([filecoin-project/lotus#8397](https://github.com/filecoin-project/lotus/pull/8397))
|
||||
- VM: Refactor pricelist to be based on network versions ([filecoin-project/lotus#8369](https://github.com/filecoin-project/lotus/pull/8369))
|
||||
- --max-piece-size in set-ask is no longer required ([filecoin-project/lotus#8361](https://github.com/filecoin-project/lotus/pull/8361))
|
||||
- refactor: convert RepoType from int to interface ([filecoin-project/lotus#8086](https://github.com/filecoin-project/lotus/pull/8086))
|
||||
- Shed: fix error message ([filecoin-project/lotus#8340](https://github.com/filecoin-project/lotus/pull/8340))
|
||||
|
||||
## Bug Fixes
|
||||
- fix: FVM: add finality check for consensus faults ([filecoin-project/lotus#8452](https://github.com/filecoin-project/lotus/pull/8452))
|
||||
- fix: market: Reuse the market `PubSub` in index provider ([filecoin-project/lotus#8443](https://github.com/filecoin-project/lotus/pull/8443))
|
||||
- fix: market: set all index provider options based on lotus config ([filecoin-project/lotus#8444](https://github.com/filecoin-project/lotus/pull/8444))
|
||||
- release worker tracker lock when call cb func ([filecoin-project/lotus#8206](https://github.com/filecoin-project/lotus/pull/8206))
|
||||
- fix: node: Fix market node startup ([filecoin-project/lotus#8425](https://github.com/filecoin-project/lotus/pull/8425))
|
||||
- fix: sealing: Fix PR1 worker selection ([filecoin-project/lotus#8420](https://github.com/filecoin-project/lotus/pull/8420))
|
||||
- fix: go: make Go 1.18 builds work ([filecoin-project/lotus#8410](https://github.com/filecoin-project/lotus/pull/8410))
|
||||
- fix: sealing: Added error checking ([filecoin-project/lotus#8404](https://github.com/filecoin-project/lotus/pull/8404))
|
||||
- fix: ux: Change Propose-worker msg ([filecoin-project/lotus#8384](https://github.com/filecoin-project/lotus/pull/8384))
|
||||
- fix: miner: dead loop on removing sector ([filecoin-project/lotus#8386](https://github.com/filecoin-project/lotus/pull/8386))
|
||||
- fix: worker: Fix default value handling ([filecoin-project/lotus#8380](https://github.com/filecoin-project/lotus/pull/8380))
|
||||
- Revert "Update params for interopnet for fvm" ([filecoin-project/lotus#8374](https://github.com/filecoin-project/lotus/pull/8374))
|
||||
- fix: cli: Reset miner/ask lists in interactive deal 'miner' step (#8155) ([filecoin-project/lotus#8155](https://github.com/filecoin-project/lotus/pull/8155))
|
||||
- fix:snapup: Rename error message ([filecoin-project/lotus#8370](https://github.com/filecoin-project/lotus/pull/8370))
|
||||
- fix: multisig: Print "waiting for confirmation.." ([filecoin-project/lotus#8368](https://github.com/filecoin-project/lotus/pull/8368))
|
||||
- fix: lotus-wallet: pass correct repo type to repo.Init ([filecoin-project/lotus#8356](https://github.com/filecoin-project/lotus/pull/8356))
|
||||
- fix: avoid racy memstores when estimating gas ([filecoin-project/lotus#8351](https://github.com/filecoin-project/lotus/pull/8351))
|
||||
- fix: itests: Don't hang on exit in MineBlocksMustPost ([filecoin-project/lotus#8345](https://github.com/filecoin-project/lotus/pull/8345))
|
||||
- fix: miner cli: Estimate deal weight in sector list when upgrading ([filecoin-project/lotus#8336](https://github.com/filecoin-project/lotus/pull/8336))
|
||||
- fix: sealing: FinalizeSector doesn't need sealed replica access ([filecoin-project/lotus#8337](https://github.com/filecoin-project/lotus/pull/8337))
|
||||
- fix: cli: add ArgsUsage field to clientGetDealCmd ([filecoin-project/lotus#8241](https://github.com/filecoin-project/lotus/pull/8241))
|
||||
- fix: market: Infer index provider topic from network name by default #8533
|
||||
- fix: deps: Update to FFI with logger bump #8588
|
||||
- fix: sealing: Finalize snap sectors before submitting proofs #8588
|
||||
|
||||
## Dependency Updates
|
||||
- deps: update go-libp2p@v0.19 #8533
|
||||
- deps: ffi: update ffi that includes the log fix #8577
|
||||
- deps: ffi: pull ffi that includes the latest fvm ([filecoin-project/lotus#8424](https://github.com/filecoin-project/lotus/pull/8424))
|
||||
- Update to go-log 2.5.1 ([filecoin-project/lotus#8422](https://github.com/filecoin-project/lotus/pull/8422))
|
||||
- chore(deps): update go-data-transfer with fixes (master edition) ([filecoin-project/lotus#8411](https://github.com/filecoin-project/lotus/pull/8411))
|
||||
- deps: update ffi with actor v7.1.0 and fvm that uses the bundle that includes the new manifest ([filecoin-project/lotus#8402](https://github.com/filecoin-project/lotus/pull/8402))
|
||||
- Update to specs-storage v0.2.2 ([filecoin-project/lotus#8400](https://github.com/filecoin-project/lotus/pull/8400))
|
||||
- chore: ffi: the latest fvm release ([filecoin-project/lotus#8381](https://github.com/filecoin-project/lotus/pull/8381))
|
||||
- Update params for interopnet for fvm ([filecoin-project/lotus#8119](https://github.com/filecoin-project/lotus/pull/8119))
|
||||
- github.com/filecoin-project/specs-storage (v0.2.0 -> v0.2.2):
|
||||
- ci: deps: macos build deps #8588
|
||||
|
||||
## Others
|
||||
- chore: merge releases back to master ([filecoin-project/lotus#8468](https://github.com/filecoin-project/lotus/pull/8468))
|
||||
- Packer publish copy orb ([filecoin-project/lotus#8413](https://github.com/filecoin-project/lotus/pull/8413))
|
||||
- chore: Remove temp gomock reflect file ([filecoin-project/lotus#8372](https://github.com/filecoin-project/lotus/pull/8372))
|
||||
- chore: FVM: log when fvm is used ([filecoin-project/lotus#8363](https://github.com/filecoin-project/lotus/pull/8363))
|
||||
- lib: extract unixfs filestore into lib ([filecoin-project/lotus#8354](https://github.com/filecoin-project/lotus/pull/8354))
|
||||
- test: use `T.TempDir` to create temporary test directory ([filecoin-project/lotus#8295](https://github.com/filecoin-project/lotus/pull/8295))
|
||||
- Update Dockerfile.lotus
|
||||
- chore:sealing:remove endpoint from cli ([filecoin-project/lotus#8215](https://github.com/filecoin-project/lotus/pull/8215))
|
||||
- chore: build: bump the master version to v1.15.2-dev ([filecoin-project/lotus#8322](https://github.com/filecoin-project/lotus/pull/8322))
|
||||
- chore: fix lint issue #8533
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @magik6k | 95 | +5147/-2922 | 401 |
|
||||
| @mz-sirius | 3 | +1789/-546 | 48 |
|
||||
| @nonsense | 11 | +777/-567 | 121 |
|
||||
| @arajasek | 11 | +336/-231 | 28 |
|
||||
| Darko Brdareski | 1 | +463/-13 | 95 |
|
||||
| @coryschwartz | 11 | +147/-217 | 13 |
|
||||
| spark8899 | 2 | +300/-0 | 2 |
|
||||
| @zenground0 | 2 | +6/-193 | 7 |
|
||||
| Eng Zer Jun | 1 | +31/-158 | 11 |
|
||||
| Kevin Li | 2 | +174/-0 | 14 |
|
||||
| @arajasek | 5 | +85/-86 | 18 |
|
||||
| @jennijuju | 1 | +0/-119 | 3 |
|
||||
| @jennijuju | 1 | +0/-98 | 6 |
|
||||
| @raulk | 1 | +60/-1 | 1 |
|
||||
| @frrist | 1 | +56/-0 | 2 |
|
||||
| @vyzo | 3 | +18/-16 | 5 |
|
||||
| @Masih | 3 | +29/-4 | 3 |
|
||||
| @jennijuju | 4 | +18/-11 | 11 |
|
||||
| @hannahhoward | 1 | +13/-10 | 2 |
|
||||
| @dirkmc | 1 | +21/-1 | 1 |
|
||||
| koalacxr | 1 | +10/-11 | 4 |
|
||||
| Aarsh Shah | 1 | +19/-1 | 1 |
|
||||
| @Rjan | 6 | +10/-8 | 7 |
|
||||
| @zl | 1 | +7/-1 | 1 |
|
||||
| KAYUII | 1 | +3/-2 | 1 |
|
||||
| @simlecode | 1 | +4/-0 | 1 |
|
||||
| @dirkmc | 1 | +1/-3 | 1 |
|
||||
| Jerry | 1 | +3/-0 | 1 |
|
||||
| @steblian | 1 | +1/-1 | 1 |
|
||||
| Geoff Stuart | 1 | +1/-0 | 1 |
|
||||
| Florian Ruen | 1 | +0/-1 | 1 |
|
||||
|
||||
# 1.15.1 / 2022-04-07
|
||||
|
||||
This is a *HIGHLY recommended* feature release v1.15.1, especially for node operators and storage providers who want to be a part of the content addressing network of Filecoin and IPFS.
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM golang:1.16.4 AS builder-deps
|
||||
FROM golang:1.17.9-buster AS builder-deps
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
@ -61,8 +61,9 @@ COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
&& mkdir -p /etc/OpenCL/vendors \
|
||||
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
||||
|
||||
###
|
||||
FROM base AS lotus
|
||||
@ -252,3 +253,4 @@ EXPOSE 1234
|
||||
EXPOSE 2345
|
||||
EXPOSE 3456
|
||||
EXPOSE 1777
|
||||
|
||||
|
4
Makefile
4
Makefile
@ -10,7 +10,7 @@ GOCC?=go
|
||||
GOVERSION:=$(shell $(GOCC) version | tr ' ' '\n' | grep go1 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
|
||||
ifeq ($(shell expr $(GOVERSION) \< 1016000), 1)
|
||||
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
|
||||
$(error Update Golang to version to at least 1.16.0)
|
||||
$(error Update Golang to version to at least 1.17.9)
|
||||
endif
|
||||
|
||||
# git modules that need to be loaded
|
||||
@ -97,7 +97,7 @@ BINS+=lotus-miner
|
||||
|
||||
lotus-worker: $(BUILD_DEPS)
|
||||
rm -f lotus-worker
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
|
||||
.PHONY: lotus-worker
|
||||
BINS+=lotus-worker
|
||||
|
||||
|
@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://d
|
||||
|
||||
#### Go
|
||||
|
||||
To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/):
|
||||
To build Lotus, you need a working installation of [Go 1.17.9 or higher](https://golang.org/dl/):
|
||||
|
||||
```bash
|
||||
wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
wget -c https://golang.org/dl/go1.17.9.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
```
|
||||
|
||||
**TIP:**
|
||||
|
@ -360,7 +360,7 @@ type FullNode interface {
|
||||
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
|
||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*StorageAsk, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
|
||||
// ClientCalcCommP calculates the CommP for a specified file
|
||||
@ -728,6 +728,12 @@ type FullNode interface {
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
Response *storagemarket.StorageAsk
|
||||
|
||||
DealProtocols []string
|
||||
}
|
||||
|
||||
type FileRef struct {
|
||||
Path string
|
||||
IsCAR bool
|
||||
|
@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
@ -25,6 +26,7 @@ type Net interface {
|
||||
|
||||
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
|
||||
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
|
||||
NetPing(context.Context, peer.ID) (time.Duration, error) //perm:read
|
||||
NetConnect(context.Context, peer.AddrInfo) error //perm:write
|
||||
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
|
||||
NetDisconnect(context.Context, peer.ID) error //perm:write
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -24,7 +25,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
)
|
||||
@ -52,6 +52,8 @@ type StorageMiner interface {
|
||||
|
||||
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
||||
|
||||
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
|
||||
|
||||
// Temp api for testing
|
||||
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
|
||||
|
||||
@ -93,6 +95,7 @@ type StorageMiner interface {
|
||||
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
|
||||
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||
// automatically removes it from storage
|
||||
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
|
||||
@ -101,7 +104,6 @@ type StorageMiner interface {
|
||||
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
|
||||
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
|
||||
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
|
||||
// Returns null if message wasn't sent
|
||||
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
|
||||
@ -143,21 +145,21 @@ type StorageMiner interface {
|
||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
||||
|
||||
//stores.SectorIndex
|
||||
StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin
|
||||
StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin
|
||||
StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin
|
||||
StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
||||
StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin
|
||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
|
||||
// SectorIndex
|
||||
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
|
||||
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
||||
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
|
||||
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
||||
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
|
||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
||||
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
||||
|
||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
||||
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
||||
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||
MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
|
||||
@ -266,13 +268,12 @@ type StorageMiner interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, update []bool, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
}
|
||||
|
||||
var _ storiface.WorkerReturn = *new(StorageMiner)
|
||||
var _ stores.SectorIndex = *new(StorageMiner)
|
||||
|
||||
type SealRes struct {
|
||||
Err string
|
||||
@ -309,6 +310,7 @@ type SectorInfo struct {
|
||||
CommitMsg *cid.Cid
|
||||
Retries uint64
|
||||
ToUpgrade bool
|
||||
ReplicaUpdateMessage *cid.Cid
|
||||
|
||||
LastErr string
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -26,6 +27,7 @@ func goCmd() string {
|
||||
}
|
||||
|
||||
func TestDoesntDependOnFFI(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_001
|
||||
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -38,6 +40,7 @@ func TestDoesntDependOnFFI(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDoesntDependOnBuild(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_002
|
||||
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -50,6 +53,7 @@ func TestDoesntDependOnBuild(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReturnTypes(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_001
|
||||
errType := reflect.TypeOf(new(error)).Elem()
|
||||
bareIface := reflect.TypeOf(new(interface{})).Elem()
|
||||
jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem()
|
||||
@ -115,6 +119,7 @@ func TestReturnTypes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPermTags(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_PERM_TAGS_001
|
||||
_ = PermissionedFullAPI(&FullNodeStruct{})
|
||||
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
||||
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
||||
|
@ -7,8 +7,9 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
@ -29,7 +30,7 @@ type Worker interface {
|
||||
|
||||
// TaskType -> Weight
|
||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
|
||||
Paths(context.Context) ([]stores.StoragePath, error) //perm:admin
|
||||
Paths(context.Context) ([]storiface.StoragePath, error) //perm:admin
|
||||
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
|
||||
|
||||
// storiface.WorkerCalls
|
||||
@ -49,6 +50,9 @@ type Worker interface {
|
||||
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
|
||||
|
||||
GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) //perm:admin
|
||||
GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) //perm:admin
|
||||
|
||||
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -199,10 +198,10 @@ func init() {
|
||||
},
|
||||
})
|
||||
addExample(api.SectorState(sealing.Proving))
|
||||
addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
||||
addExample(storiface.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
||||
addExample(storiface.FTUnsealed)
|
||||
addExample(storiface.PathSealing)
|
||||
addExample(map[stores.ID][]stores.Decl{
|
||||
addExample(map[storiface.ID][]storiface.Decl{
|
||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
|
||||
{
|
||||
SectorID: abi.SectorID{Miner: 1000, Number: 100},
|
||||
@ -210,7 +209,7 @@ func init() {
|
||||
},
|
||||
},
|
||||
})
|
||||
addExample(map[stores.ID]string{
|
||||
addExample(map[storiface.ID]string{
|
||||
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
|
||||
})
|
||||
addExample(map[uuid.UUID][]storiface.WorkerJob{
|
||||
|
@ -8,12 +8,12 @@ import (
|
||||
context "context"
|
||||
json "encoding/json"
|
||||
reflect "reflect"
|
||||
time "time"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
@ -745,10 +745,10 @@ func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3
|
||||
}
|
||||
|
||||
// ClientQueryAsk mocks base method.
|
||||
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*api.StorageAsk, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*storagemarket.StorageAsk)
|
||||
ret0, _ := ret[0].(*api.StorageAsk)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@ -1856,6 +1856,21 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
|
||||
}
|
||||
|
||||
// NetPing mocks base method.
|
||||
func (m *MockFullNode) NetPing(arg0 context.Context, arg1 peer.ID) (time.Duration, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetPing", arg0, arg1)
|
||||
ret0, _ := ret[0].(time.Duration)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetPing indicates an expected call of NetPing.
|
||||
func (mr *MockFullNodeMockRecorder) NetPing(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPing", reflect.TypeOf((*MockFullNode)(nil).NetPing), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetProtectAdd mocks base method.
|
||||
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
158
api/proxy_gen.go
158
api/proxy_gen.go
@ -25,12 +25,12 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||
"github.com/filecoin-project/specs-actors/v5/actors/runtime/proof"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -190,7 +190,7 @@ type FullNodeStruct struct {
|
||||
|
||||
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"`
|
||||
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) `perm:"read"`
|
||||
|
||||
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||
|
||||
@ -597,6 +597,8 @@ type NetStruct struct {
|
||||
|
||||
NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"`
|
||||
|
||||
NetPing func(p0 context.Context, p1 peer.ID) (time.Duration, error) `perm:"read"`
|
||||
|
||||
NetProtectAdd func(p0 context.Context, p1 []peer.ID) error `perm:"admin"`
|
||||
|
||||
NetProtectList func(p0 context.Context) ([]peer.ID, error) `perm:"read"`
|
||||
@ -635,10 +637,12 @@ type StorageMinerStruct struct {
|
||||
|
||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"`
|
||||
|
||||
ComputeWindowPoSt func(p0 context.Context, p1 uint64, p2 types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) `perm:"admin"`
|
||||
|
||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
DagstoreGC func(p0 context.Context) ([]DagstoreShardResult, error) `perm:"admin"`
|
||||
@ -823,29 +827,29 @@ type StorageMinerStruct struct {
|
||||
|
||||
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||
|
||||
StorageAttach func(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"`
|
||||
StorageAttach func(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error `perm:"admin"`
|
||||
|
||||
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) `perm:"admin"`
|
||||
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) `perm:"admin"`
|
||||
|
||||
StorageDeclareSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
||||
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
||||
|
||||
StorageDropSector func(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||
StorageDropSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||
|
||||
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"`
|
||||
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
|
||||
|
||||
StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"`
|
||||
|
||||
StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"`
|
||||
StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"`
|
||||
|
||||
StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
|
||||
StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"`
|
||||
|
||||
StorageLocal func(p0 context.Context) (map[stores.ID]string, error) `perm:"admin"`
|
||||
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
|
||||
|
||||
StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||
|
||||
StorageReportHealth func(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error `perm:"admin"`
|
||||
StorageReportHealth func(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error `perm:"admin"`
|
||||
|
||||
StorageStat func(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||
|
||||
StorageTryLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) (bool, error) `perm:"admin"`
|
||||
|
||||
@ -898,11 +902,15 @@ type WorkerStruct struct {
|
||||
|
||||
GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
GenerateWindowPoSt func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) `perm:"admin"`
|
||||
|
||||
GenerateWinningPoSt func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) `perm:"admin"`
|
||||
|
||||
Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
|
||||
|
||||
MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
Paths func(p0 context.Context) ([]stores.StoragePath, error) `perm:"admin"`
|
||||
Paths func(p0 context.Context) ([]storiface.StoragePath, error) `perm:"admin"`
|
||||
|
||||
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
|
||||
|
||||
@ -1563,14 +1571,14 @@ func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Addr
|
||||
return *new(QueryOffer), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) {
|
||||
if s.Internal.ClientQueryAsk == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.ClientQueryAsk(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||
func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*StorageAsk, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
@ -3708,6 +3716,17 @@ func (s *NetStub) NetPeers(p0 context.Context) ([]peer.AddrInfo, error) {
|
||||
return *new([]peer.AddrInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetPing(p0 context.Context, p1 peer.ID) (time.Duration, error) {
|
||||
if s.Internal.NetPing == nil {
|
||||
return *new(time.Duration), ErrNotSupported
|
||||
}
|
||||
return s.Internal.NetPing(p0, p1)
|
||||
}
|
||||
|
||||
func (s *NetStub) NetPing(p0 context.Context, p1 peer.ID) (time.Duration, error) {
|
||||
return *new(time.Duration), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *NetStruct) NetProtectAdd(p0 context.Context, p1 []peer.ID) error {
|
||||
if s.Internal.NetProtectAdd == nil {
|
||||
return ErrNotSupported
|
||||
@ -3818,14 +3837,14 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
||||
return *new(abi.SectorSize), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3, p4)
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 []bool, p4 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -3840,6 +3859,17 @@ func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.Extende
|
||||
return *new([]builtin.PoStProof), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ComputeWindowPoSt(p0 context.Context, p1 uint64, p2 types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) {
|
||||
if s.Internal.ComputeWindowPoSt == nil {
|
||||
return *new([]miner.SubmitWindowedPoStParams), ErrNotSupported
|
||||
}
|
||||
return s.Internal.ComputeWindowPoSt(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ComputeWindowPoSt(p0 context.Context, p1 uint64, p2 types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) {
|
||||
return *new([]miner.SubmitWindowedPoStParams), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CreateBackup(p0 context.Context, p1 string) error {
|
||||
if s.Internal.CreateBackup == nil {
|
||||
return ErrNotSupported
|
||||
@ -4852,59 +4882,59 @@ func (s *StorageMinerStub) StorageAddLocal(p0 context.Context, p1 string) error
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
|
||||
func (s *StorageMinerStruct) StorageAttach(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error {
|
||||
if s.Internal.StorageAttach == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageAttach(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 stores.StorageInfo, p2 fsutil.FsStat) error {
|
||||
func (s *StorageMinerStub) StorageAttach(p0 context.Context, p1 storiface.StorageInfo, p2 fsutil.FsStat) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
|
||||
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||
if s.Internal.StorageBestAlloc == nil {
|
||||
return *new([]stores.StorageInfo), ErrNotSupported
|
||||
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageBestAlloc(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]stores.StorageInfo, error) {
|
||||
return *new([]stores.StorageInfo), ErrNotSupported
|
||||
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) {
|
||||
return *new([]storiface.StorageInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||
func (s *StorageMinerStruct) StorageDeclareSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||
if s.Internal.StorageDeclareSector == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageDeclareSector(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||
func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||
if s.Internal.StorageDropSector == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageDropSector(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 stores.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||
func (s *StorageMinerStub) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
|
||||
func (s *StorageMinerStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||
if s.Internal.StorageFindSector == nil {
|
||||
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
||||
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) {
|
||||
return *new([]stores.SectorStorageInfo), ErrNotSupported
|
||||
func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
|
||||
return *new([]storiface.SectorStorageInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) {
|
||||
@ -4918,37 +4948,37 @@ func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.Sector
|
||||
return *new(storiface.SectorLocks), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
||||
func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||
if s.Internal.StorageInfo == nil {
|
||||
return *new(stores.StorageInfo), ErrNotSupported
|
||||
return *new(storiface.StorageInfo), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageInfo(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) {
|
||||
return *new(stores.StorageInfo), ErrNotSupported
|
||||
func (s *StorageMinerStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
|
||||
return *new(storiface.StorageInfo), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
|
||||
func (s *StorageMinerStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||
if s.Internal.StorageList == nil {
|
||||
return *new(map[stores.ID][]stores.Decl), ErrNotSupported
|
||||
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageList(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageList(p0 context.Context) (map[stores.ID][]stores.Decl, error) {
|
||||
return *new(map[stores.ID][]stores.Decl), ErrNotSupported
|
||||
func (s *StorageMinerStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
|
||||
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
|
||||
func (s *StorageMinerStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||
if s.Internal.StorageLocal == nil {
|
||||
return *new(map[stores.ID]string), ErrNotSupported
|
||||
return *new(map[storiface.ID]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageLocal(p0)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[stores.ID]string, error) {
|
||||
return *new(map[stores.ID]string), ErrNotSupported
|
||||
func (s *StorageMinerStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||
return *new(map[storiface.ID]string), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageLock(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error {
|
||||
@ -4962,25 +4992,25 @@ func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 s
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
|
||||
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
||||
if s.Internal.StorageReportHealth == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageReportHealth(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 stores.ID, p2 stores.HealthReport) error {
|
||||
func (s *StorageMinerStub) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
|
||||
func (s *StorageMinerStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||
if s.Internal.StorageStat == nil {
|
||||
return *new(fsutil.FsStat), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StorageStat(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 stores.ID) (fsutil.FsStat, error) {
|
||||
func (s *StorageMinerStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
|
||||
return *new(fsutil.FsStat), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -5171,6 +5201,28 @@ func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.Se
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) GenerateWindowPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||
if s.Internal.GenerateWindowPoSt == nil {
|
||||
return *new(storiface.WindowPoStResult), ErrNotSupported
|
||||
}
|
||||
return s.Internal.GenerateWindowPoSt(p0, p1, p2, p3, p4, p5)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) GenerateWindowPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 int, p5 abi.PoStRandomness) (storiface.WindowPoStResult, error) {
|
||||
return *new(storiface.WindowPoStResult), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) GenerateWinningPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||
if s.Internal.GenerateWinningPoSt == nil {
|
||||
return *new([]proof.PoStProof), ErrNotSupported
|
||||
}
|
||||
return s.Internal.GenerateWinningPoSt(p0, p1, p2, p3, p4)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) GenerateWinningPoSt(p0 context.Context, p1 abi.RegisteredPoStProof, p2 abi.ActorID, p3 []storiface.PostSectorChallenge, p4 abi.PoStRandomness) ([]proof.PoStProof, error) {
|
||||
return *new([]proof.PoStProof), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) {
|
||||
if s.Internal.Info == nil {
|
||||
return *new(storiface.WorkerInfo), ErrNotSupported
|
||||
@ -5193,15 +5245,15 @@ func (s *WorkerStub) MoveStorage(p0 context.Context, p1 storage.SectorRef, p2 st
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) Paths(p0 context.Context) ([]stores.StoragePath, error) {
|
||||
func (s *WorkerStruct) Paths(p0 context.Context) ([]storiface.StoragePath, error) {
|
||||
if s.Internal.Paths == nil {
|
||||
return *new([]stores.StoragePath), ErrNotSupported
|
||||
return *new([]storiface.StoragePath), ErrNotSupported
|
||||
}
|
||||
return s.Internal.Paths(p0)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) Paths(p0 context.Context) ([]stores.StoragePath, error) {
|
||||
return *new([]stores.StoragePath), ErrNotSupported
|
||||
func (s *WorkerStub) Paths(p0 context.Context) ([]storiface.StoragePath, error) {
|
||||
return *new([]storiface.StoragePath), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) ProcessSession(p0 context.Context) (uuid.UUID, error) {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package api
|
||||
|
||||
import (
|
||||
@ -29,6 +30,7 @@ type StrC struct {
|
||||
}
|
||||
|
||||
func TestGetInternalStructs(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
|
||||
var proxy StrA
|
||||
|
||||
sts := GetInternalStructs(&proxy)
|
||||
@ -44,6 +46,7 @@ func TestGetInternalStructs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNestedInternalStructs(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
|
||||
var proxy StrC
|
||||
|
||||
// check that only the top-level internal struct gets picked up
|
||||
|
@ -7,6 +7,7 @@ package v0mocks
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
time "time"
|
||||
|
||||
address "github.com/filecoin-project/go-address"
|
||||
bitfield "github.com/filecoin-project/go-bitfield"
|
||||
@ -1769,6 +1770,21 @@ func (mr *MockFullNodeMockRecorder) NetPeers(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPeers", reflect.TypeOf((*MockFullNode)(nil).NetPeers), arg0)
|
||||
}
|
||||
|
||||
// NetPing mocks base method.
|
||||
func (m *MockFullNode) NetPing(arg0 context.Context, arg1 peer.ID) (time.Duration, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "NetPing", arg0, arg1)
|
||||
ret0, _ := ret[0].(time.Duration)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// NetPing indicates an expected call of NetPing.
|
||||
func (mr *MockFullNodeMockRecorder) NetPing(arg0, arg1 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPing", reflect.TypeOf((*MockFullNode)(nil).NetPing), arg0, arg1)
|
||||
}
|
||||
|
||||
// NetProtectAdd mocks base method.
|
||||
func (m *MockFullNode) NetProtectAdd(arg0 context.Context, arg1 []peer.ID) error {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -3,6 +3,9 @@ package v0api
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
@ -341,4 +344,12 @@ func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address,
|
||||
return w.FullNode.PaychFund(ctx, from, to, amt)
|
||||
}
|
||||
|
||||
func (w *WrapperV1Full) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
|
||||
a, err := w.FullNode.ClientQueryAsk(ctx, p, miner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return a.Response, nil
|
||||
}
|
||||
|
||||
var _ FullNode = &WrapperV1Full{}
|
||||
|
@ -1,10 +1,10 @@
|
||||
//stm: #unit
|
||||
package badgerbs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -20,6 +20,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBadgerBlockstore(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
(&Suite{
|
||||
NewBlockstore: newBlockstore(DefaultOptions),
|
||||
OpenBlockstore: openBlockstore(DefaultOptions),
|
||||
@ -38,6 +40,8 @@ func TestBadgerBlockstore(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStorageKey(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_STORAGE_KEY_001
|
||||
bs, _ := newBlockstore(DefaultOptions)(t)
|
||||
bbs := bs.(*Blockstore)
|
||||
defer bbs.Close() //nolint:errcheck
|
||||
@ -73,20 +77,13 @@ func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (
|
||||
return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
|
||||
tb.Helper()
|
||||
|
||||
path, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
path = tb.TempDir()
|
||||
|
||||
db, err := Open(optsSupplier(path))
|
||||
if err != nil {
|
||||
tb.Fatal(err)
|
||||
}
|
||||
|
||||
tb.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
|
||||
return db, path
|
||||
}
|
||||
}
|
||||
@ -100,17 +97,10 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB,
|
||||
|
||||
func testMove(t *testing.T, optsF func(string) Options) {
|
||||
ctx := context.Background()
|
||||
basePath, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
basePath := t.TempDir()
|
||||
|
||||
dbPath := filepath.Join(basePath, "db")
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(basePath)
|
||||
})
|
||||
|
||||
db, err := Open(optsF(dbPath))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -265,10 +255,16 @@ func testMove(t *testing.T, optsF func(string) Options) {
|
||||
}
|
||||
|
||||
func TestMoveNoPrefix(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
|
||||
testMove(t, DefaultOptions)
|
||||
}
|
||||
|
||||
func TestMoveWithPrefix(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
|
||||
testMove(t, func(path string) Options {
|
||||
opts := DefaultOptions(path)
|
||||
opts.Prefix = "/prefixed/"
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package badgerbs
|
||||
|
||||
import (
|
||||
@ -44,6 +45,8 @@ func (s *Suite) RunTests(t *testing.T, prefix string) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -57,6 +60,8 @@ func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -68,6 +73,9 @@ func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -85,6 +93,8 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestHas(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -106,6 +116,9 @@ func (s *Suite) TestHas(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -123,6 +136,9 @@ func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_SIZE_001
|
||||
ctx := context.Background()
|
||||
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
@ -154,6 +170,8 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestAllKeysSimple(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
defer func() { require.NoError(t, c.Close()) }()
|
||||
@ -170,6 +188,9 @@ func (s *Suite) TestAllKeysSimple(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
defer func() { require.NoError(t, c.Close()) }()
|
||||
@ -200,6 +221,7 @@ func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestDoubleClose(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
c, ok := bs.(io.Closer)
|
||||
if !ok {
|
||||
@ -210,6 +232,9 @@ func (s *Suite) TestDoubleClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001
|
||||
ctx := context.Background()
|
||||
bs, path := s.NewBlockstore(t)
|
||||
c, ok := bs.(io.Closer)
|
||||
@ -236,6 +261,10 @@ func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestPutMany(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
|
||||
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_PUT_MANY_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
@ -268,6 +297,11 @@ func (s *Suite) TestPutMany(t *testing.T) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestDelete(t *testing.T) {
|
||||
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
|
||||
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_POOLED_STORAGE_HAS_001
|
||||
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001, @SPLITSTORE_BADGER_HAS_001
|
||||
//stm: @SPLITSTORE_BADGER_PUT_MANY_001
|
||||
|
||||
ctx := context.Background()
|
||||
bs, _ := s.NewBlockstore(t)
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
|
@ -1,8 +1,6 @@
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@ -11,14 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCheckpoint(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "checkpoint.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
dir := t.TempDir()
|
||||
|
||||
path := filepath.Join(dir, "checkpoint")
|
||||
|
||||
|
@ -2,8 +2,6 @@ package splitstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
@ -12,14 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestColdSet(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "coldset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
})
|
||||
dir := t.TempDir()
|
||||
|
||||
path := filepath.Join(dir, "coldset")
|
||||
|
||||
|
@ -1,8 +1,7 @@
|
||||
//stm: #unit
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
@ -10,6 +9,8 @@ import (
|
||||
)
|
||||
|
||||
func TestMapMarkSet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
testMarkSet(t, "map")
|
||||
testMarkSetRecovery(t, "map")
|
||||
testMarkSetMarkMany(t, "map")
|
||||
@ -18,6 +19,8 @@ func TestMapMarkSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBadgerMarkSet(t *testing.T) {
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_001, @SPLITSTORE_MARKSET_HAS_001, @@SPLITSTORE_MARKSET_MARK_001
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001, @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
bs := badgerMarkSetBatchSize
|
||||
badgerMarkSetBatchSize = 1
|
||||
t.Cleanup(func() {
|
||||
@ -31,14 +34,7 @@ func TestBadgerMarkSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func testMarkSet(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -46,6 +42,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_CREATE_001
|
||||
hotSet, err := env.New("hot", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -65,6 +62,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_HAS_001
|
||||
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||
t.Helper()
|
||||
has, err := s.Has(cid)
|
||||
@ -94,6 +92,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
k3 := makeCid("c")
|
||||
k4 := makeCid("d")
|
||||
|
||||
// stm: @SPLITSTORE_MARKSET_MARK_001
|
||||
hotSet.Mark(k1) //nolint
|
||||
hotSet.Mark(k2) //nolint
|
||||
coldSet.Mark(k3) //nolint
|
||||
@ -144,6 +143,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
mustNotHave(coldSet, k3)
|
||||
mustNotHave(coldSet, k4)
|
||||
|
||||
//stm: @SPLITSTORE_MARKSET_CLOSE_001
|
||||
err = hotSet.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -156,14 +156,7 @@ func testMarkSet(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -171,6 +164,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
defer env.Close() //nolint:errcheck
|
||||
|
||||
//stm: @SPLITSTORE_MARKSET_CREATE_VISITOR_001
|
||||
visitor, err := env.New("test", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -225,14 +219,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -324,14 +311,7 @@ func testMarkSetVisitorRecovery(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
@ -437,14 +417,7 @@ func testMarkSetRecovery(t *testing.T, lsType string) {
|
||||
}
|
||||
|
||||
func testMarkSetMarkMany(t *testing.T, lsType string) {
|
||||
path, err := ioutil.TempDir("", "markset.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
env, err := OpenMarkSetEnv(path, lsType)
|
||||
if err != nil {
|
||||
|
@ -1,12 +1,11 @@
|
||||
//stm: #unit
|
||||
package splitstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@ -85,14 +84,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, cfg)
|
||||
@ -228,10 +220,16 @@ func testSplitStore(t *testing.T, cfg *Config) {
|
||||
}
|
||||
|
||||
func TestSplitStoreCompaction(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
testSplitStore(t, &Config{MarkSetType: "map"})
|
||||
}
|
||||
|
||||
func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
bs := badgerMarkSetBatchSize
|
||||
badgerMarkSetBatchSize = 1
|
||||
t.Cleanup(func() {
|
||||
@ -241,6 +239,9 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
ctx := context.Background()
|
||||
chain := &mockChain{t: t}
|
||||
|
||||
@ -277,14 +278,7 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
@ -424,14 +418,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
@ -531,14 +518,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
|
||||
}
|
||||
}
|
||||
|
||||
path, err := ioutil.TempDir("", "splitstore.*")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = os.RemoveAll(path)
|
||||
})
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
@ -13,6 +14,9 @@ import (
|
||||
)
|
||||
|
||||
func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_START_001
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_PUT_001, @SPLITSTORE_TIMED_BLOCKSTORE_HAS_001, @SPLITSTORE_TIMED_BLOCKSTORE_GET_001
|
||||
//stm: @SPLITSTORE_TIMED_BLOCKSTORE_ALL_KEYS_CHAN_001
|
||||
tc := NewTimedCacheBlockstore(10 * time.Millisecond)
|
||||
mClock := clock.NewMock()
|
||||
mClock.Set(time.Now())
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
@ -15,6 +16,7 @@ var (
|
||||
)
|
||||
|
||||
func TestUnionBlockstore_Get(t *testing.T) {
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_GET_001
|
||||
ctx := context.Background()
|
||||
m1 := NewMemory()
|
||||
m2 := NewMemory()
|
||||
@ -34,6 +36,9 @@ func TestUnionBlockstore_Get(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) {
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_001, @SPLITSTORE_UNION_BLOCKSTORE_HAS_001
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_PUT_MANY_001, @SPLITSTORE_UNION_BLOCKSTORE_DELETE_001
|
||||
//stm: @SPLITSTORE_UNION_BLOCKSTORE_ALL_KEYS_CHAN_001
|
||||
ctx := context.Background()
|
||||
m1 := NewMemory()
|
||||
m2 := NewMemory()
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package build
|
||||
|
||||
import (
|
||||
@ -7,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestOpenRPCDiscoverJSON_Version(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_OPENRPC_VERSION_001
|
||||
// openRPCDocVersion is the current OpenRPC version of the API docs.
|
||||
openRPCDocVersion := "1.2.6"
|
||||
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.15.1"
|
||||
const BuildVersion = "1.15.2"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package adt
|
||||
|
||||
import (
|
||||
@ -44,6 +45,7 @@ func TestDiffAdtArray(t *testing.T) {
|
||||
|
||||
changes := new(TestDiffArray)
|
||||
|
||||
//stm: @CHAIN_ADT_ARRAY_DIFF_001
|
||||
assert.NoError(t, DiffAdtArray(arrA, arrB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
@ -98,6 +100,7 @@ func TestDiffAdtMap(t *testing.T) {
|
||||
|
||||
changes := new(TestDiffMap)
|
||||
|
||||
//stm: @CHAIN_ADT_MAP_DIFF_001
|
||||
assert.NoError(t, DiffAdtMap(mapA, mapB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package aerrors_test
|
||||
|
||||
import (
|
||||
@ -11,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFatalError(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001
|
||||
e1 := xerrors.New("out of disk space")
|
||||
e2 := xerrors.Errorf("could not put node: %w", e1)
|
||||
e3 := xerrors.Errorf("could not save head: %w", e2)
|
||||
@ -24,6 +26,7 @@ func TestFatalError(t *testing.T) {
|
||||
assert.True(t, IsFatal(aw4), "should be fatal")
|
||||
}
|
||||
func TestAbsorbeError(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_ACTOR_ERRORS_001
|
||||
e1 := xerrors.New("EOF")
|
||||
e2 := xerrors.Errorf("could not decode: %w", e1)
|
||||
ae := Absorb(e2, 35, "failed to decode CBOR")
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package policy
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
for t := range miner0.SupportedProofTypes {
|
||||
oldTypes = append(oldTypes, t)
|
||||
}
|
||||
//stm: @BLOCKCHAIN_POLICY_SET_MAX_SUPPORTED_PROOF_TYPES_001
|
||||
t.Cleanup(func() {
|
||||
SetSupportedProofTypes(oldTypes...)
|
||||
})
|
||||
@ -33,6 +35,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
abi.RegisteredSealProof_StackedDrg2KiBV1: {},
|
||||
},
|
||||
)
|
||||
//stm: @BLOCKCHAIN_POLICY_ADD_MAX_SUPPORTED_PROOF_TYPES_001
|
||||
AddSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1)
|
||||
require.EqualValues(t,
|
||||
miner0.SupportedProofTypes,
|
||||
@ -45,6 +48,7 @@ func TestSupportedProofTypes(t *testing.T) {
|
||||
|
||||
// Tests assumptions about policies being the same between actor versions.
|
||||
func TestAssumptions(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_POLICY_ASSUMPTIONS_001
|
||||
require.EqualValues(t, miner0.SupportedProofTypes, miner2.PreCommitSealProofTypesV0)
|
||||
require.Equal(t, miner0.PreCommitChallengeDelay, miner2.PreCommitChallengeDelay)
|
||||
require.Equal(t, miner0.MaxSectorExpirationExtension, miner2.MaxSectorExpirationExtension)
|
||||
@ -58,6 +62,7 @@ func TestAssumptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPartitionSizes(t *testing.T) {
|
||||
//stm: @CHAIN_ACTOR_PARTITION_SIZES_001
|
||||
for _, p := range abi.SealProofInfos {
|
||||
sizeNew, err := builtin2.PoStProofWindowPoStPartitionSectors(p.WindowPoStProof)
|
||||
require.NoError(t, err)
|
||||
@ -71,6 +76,7 @@ func TestPartitionSizes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPoStSize(t *testing.T) {
|
||||
//stm: @BLOCKCHAIN_POLICY_GET_MAX_POST_PARTITIONS_001
|
||||
v12PoStSize, err := GetMaxPoStPartitions(network.Version12, abi.RegisteredPoStProof_StackedDrgWindow64GiBV1)
|
||||
require.Equal(t, 4, v12PoStSize)
|
||||
require.NoError(t, err)
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//Only tests external library behavior, therefore it should not be annotated
|
||||
package drand
|
||||
|
||||
import (
|
||||
|
@ -467,7 +467,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl
|
||||
}
|
||||
|
||||
nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height)
|
||||
pl := vm.PricelistByEpoch(b.Header.Height)
|
||||
pl := vm.PricelistByEpochAndNetworkVersion(b.Header.Height, nv)
|
||||
var sumGasLimit int64
|
||||
checkMsg := func(msg types.ChainMsg) error {
|
||||
m := msg.VMMessage()
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package events
|
||||
|
||||
import (
|
||||
@ -358,6 +359,7 @@ func (fcs *fakeCS) advance(rev, app, drop int, msgs map[int]cid.Cid, nulls ...in
|
||||
var _ EventAPI = &fakeCS{}
|
||||
|
||||
func TestAt(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
require.NoError(t, err)
|
||||
@ -418,6 +420,7 @@ func TestAt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtNullTrigger(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
require.NoError(t, err)
|
||||
@ -447,6 +450,7 @@ func TestAtNullTrigger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtNullConf(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001, @EVENTS_HEIGHT_REVERT_001
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -485,6 +489,7 @@ func TestAtNullConf(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtStart(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -515,6 +520,7 @@ func TestAtStart(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtStartConfidence(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -541,6 +547,7 @@ func TestAtStartConfidence(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChained(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -571,6 +578,7 @@ func TestAtChained(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChainedConfidence(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -601,6 +609,7 @@ func TestAtChainedConfidence(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAtChainedConfidenceNull(t *testing.T) {
|
||||
//stm: @EVENTS_HEIGHT_CHAIN_AT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -632,6 +641,7 @@ func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Messag
|
||||
}
|
||||
|
||||
func TestCalled(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -837,6 +847,7 @@ func TestCalled(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledTimeout(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -897,6 +908,7 @@ func TestCalledTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledOrder(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -953,6 +965,7 @@ func TestCalledOrder(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledNull(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1011,6 +1024,7 @@ func TestCalledNull(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveTriggersOnMessage(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1094,6 +1108,7 @@ type testStateChange struct {
|
||||
}
|
||||
|
||||
func TestStateChanged(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1179,6 +1194,7 @@ func TestStateChanged(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateChangedRevert(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1255,6 +1271,7 @@ func TestStateChangedRevert(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStateChangedTimeout(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
timeoutHeight := abi.ChainEpoch(20)
|
||||
confidence := 3
|
||||
|
||||
@ -1332,6 +1349,7 @@ func TestStateChangedTimeout(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCalledMultiplePerEpoch(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(context.Background(), fcs)
|
||||
@ -1384,6 +1402,7 @@ func TestCalledMultiplePerEpoch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCachedSameBlock(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
_, err := NewEvents(context.Background(), fcs)
|
||||
@ -1418,6 +1437,7 @@ func (t *testObserver) Revert(_ context.Context, from, to *types.TipSet) error {
|
||||
}
|
||||
|
||||
func TestReconnect(t *testing.T) {
|
||||
//stm: @EVENTS_EVENTS_CALLED_001, @EVENTS_HEIGHT_REVERT_001
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@ -1449,3 +1469,35 @@ func TestReconnect(t *testing.T) {
|
||||
fcs.advance(0, 5, 2, nil, 0, 1, 3)
|
||||
require.True(t, fcs.callNumber["ChainGetPath"] == 4)
|
||||
}
|
||||
|
||||
func TestUnregister(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fcs := newFakeCS(t)
|
||||
|
||||
events, err := NewEvents(ctx, fcs)
|
||||
require.NoError(t, err)
|
||||
|
||||
tsObs := &testObserver{t: t}
|
||||
events.Observe(tsObs)
|
||||
|
||||
// observer receives heads as the chain advances
|
||||
fcs.advance(0, 1, 0, nil)
|
||||
headBeforeDeregister := events.lastTs
|
||||
require.Equal(t, tsObs.head, headBeforeDeregister)
|
||||
|
||||
// observer unregistered successfully
|
||||
found := events.Unregister(tsObs)
|
||||
require.True(t, found)
|
||||
|
||||
// observer stops receiving heads as the chain advances
|
||||
fcs.advance(0, 1, 0, nil)
|
||||
require.Equal(t, tsObs.head, headBeforeDeregister)
|
||||
require.NotEqual(t, tsObs.head, events.lastTs)
|
||||
|
||||
// unregistering an invalid observer returns false
|
||||
dneObs := &testObserver{t: t}
|
||||
found = events.Unregister(dneObs)
|
||||
require.False(t, found)
|
||||
}
|
||||
|
@ -253,3 +253,27 @@ func (o *observer) Observe(obs TipSetObserver) *types.TipSet {
|
||||
o.observers = append(o.observers, obs)
|
||||
return o.head
|
||||
}
|
||||
|
||||
// Unregister unregisters an observer. Returns true if we successfully removed the observer.
|
||||
//
|
||||
// NOTE: The observer _may_ be called after being removed. Observers MUST handle this case
|
||||
// internally.
|
||||
func (o *observer) Unregister(obs TipSetObserver) (found bool) {
|
||||
o.lk.Lock()
|
||||
defer o.lk.Unlock()
|
||||
// We _copy_ the observers list because we may be concurrently reading it from a headChange
|
||||
// handler.
|
||||
//
|
||||
// This should happen infrequently, so it's fine if we spend a bit of time here.
|
||||
newObservers := make([]TipSetObserver, 0, len(o.observers))
|
||||
for _, existingObs := range o.observers {
|
||||
if existingObs == obs {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newObservers = append(newObservers, existingObs)
|
||||
}
|
||||
|
||||
o.observers = newObservers
|
||||
return found
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package state
|
||||
|
||||
import (
|
||||
@ -35,6 +36,12 @@ func init() {
|
||||
}
|
||||
|
||||
func TestMarketPredicates(t *testing.T) {
|
||||
//stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001
|
||||
//stm: @EVENTS_PREDICATES_DEAL_CHANGED_FOR_IDS
|
||||
|
||||
//stm: @EVENTS_PREDICATES_ON_BALANCE_CHANGED_001, @EVENTS_PREDICATES_BALANCE_CHANGED_FOR_ADDRESS_001
|
||||
//stm: @EVENTS_PREDICATES_ON_DEAL_PROPOSAL_CHANGED_001, @EVENTS_PREDICATES_PROPOSAL_AMT_CHANGED_001
|
||||
//stm: @EVENTS_PREDICATES_DEAL_STATE_CHANGED_001, @EVENTS_PREDICATES_DEAL_AMT_CHANGED_001
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewMemorySync()
|
||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||
@ -333,6 +340,8 @@ func TestMarketPredicates(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMinerSectorChange(t *testing.T) {
|
||||
//stm: @EVENTS_PREDICATES_ON_ACTOR_STATE_CHANGED_001, @EVENTS_PREDICATES_MINER_ACTOR_CHANGE_001
|
||||
//stm: @EVENTS_PREDICATES_MINER_SECTOR_CHANGE_001
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewMemorySync()
|
||||
store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package events
|
||||
|
||||
import (
|
||||
@ -92,6 +93,7 @@ func (h *cacheHarness) skip(n abi.ChainEpoch) {
|
||||
}
|
||||
|
||||
func TestTsCache(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
h := newCacheharness(t)
|
||||
|
||||
for i := 0; i < 9000; i++ {
|
||||
@ -104,6 +106,8 @@ func TestTsCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTsCacheNulls(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_BEFORE_001, @EVENTS_CACHE_GET_CHAIN_TIPSET_AFTER_001
|
||||
//stm: @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
ctx := context.Background()
|
||||
h := newCacheharness(t)
|
||||
|
||||
@ -182,6 +186,7 @@ func (tc *tsCacheAPIStorageCallCounter) ChainGetTipSet(ctx context.Context, tsk
|
||||
}
|
||||
|
||||
func TestTsCacheEmpty(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001
|
||||
// Calling best on an empty cache should just call out to the chain API
|
||||
callCounter := &tsCacheAPIStorageCallCounter{t: t}
|
||||
tsc := newTSCache(callCounter, 50)
|
||||
@ -191,6 +196,7 @@ func TestTsCacheEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTsCacheSkip(t *testing.T) {
|
||||
//stm: @EVENTS_CACHE_GET_CHAIN_HEAD_001, @EVENTS_CACHE_GET_001, @EVENTS_CACHE_ADD_001
|
||||
h := newCacheharness(t)
|
||||
|
||||
ts, err := types.NewTipSet([]*types.BlockHeader{{
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package gen
|
||||
|
||||
import (
|
||||
@ -34,6 +35,7 @@ func testGeneration(t testing.TB, n int, msgs int, sectors int) {
|
||||
}
|
||||
|
||||
func TestChainGeneration(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEW_GEN_WITH_SECTORS_001, @CHAIN_GEN_NEXT_TIPSET_001
|
||||
t.Run("10-20-1", func(t *testing.T) { testGeneration(t, 10, 20, 1) })
|
||||
t.Run("10-20-25", func(t *testing.T) { testGeneration(t, 10, 20, 25) })
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package market
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ import (
|
||||
|
||||
// TestFundManagerBasic verifies that the basic fund manager operations work
|
||||
func TestFundManagerBasic(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -106,6 +108,7 @@ func TestFundManagerBasic(t *testing.T) {
|
||||
|
||||
// TestFundManagerParallel verifies that operations can be run in parallel
|
||||
func TestFundManagerParallel(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -197,6 +200,7 @@ func TestFundManagerParallel(t *testing.T) {
|
||||
|
||||
// TestFundManagerReserveByWallet verifies that reserve requests are grouped by wallet
|
||||
func TestFundManagerReserveByWallet(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -290,6 +294,7 @@ func TestFundManagerReserveByWallet(t *testing.T) {
|
||||
// TestFundManagerWithdrawal verifies that as many withdraw operations as
|
||||
// possible are processed
|
||||
func TestFundManagerWithdrawalLimit(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -384,6 +389,7 @@ func TestFundManagerWithdrawalLimit(t *testing.T) {
|
||||
|
||||
// TestFundManagerWithdrawByWallet verifies that withdraw requests are grouped by wallet
|
||||
func TestFundManagerWithdrawByWallet(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001, @MARKET_WITHDRAW_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -493,6 +499,7 @@ func TestFundManagerWithdrawByWallet(t *testing.T) {
|
||||
// TestFundManagerRestart verifies that waiting for incomplete requests resumes
|
||||
// on restart
|
||||
func TestFundManagerRestart(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
@ -559,6 +566,7 @@ func TestFundManagerRestart(t *testing.T) {
|
||||
// 3. Deal B completes, reducing addr1 by 7: reserved 12 available 12 -> 5
|
||||
// 4. Deal A releases 5 from addr1: reserved 12 -> 7 available 5
|
||||
func TestFundManagerReleaseAfterPublish(t *testing.T) {
|
||||
//stm: @MARKET_RESERVE_FUNDS_001, @MARKET_RELEASE_FUNDS_001
|
||||
s := setup(t)
|
||||
defer s.fm.Stop()
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package messagepool
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBlockProbability(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_001
|
||||
mp := &MessagePool{}
|
||||
bp := mp.blockProbabilities(1 - 0.15)
|
||||
t.Logf("%+v\n", bp)
|
||||
@ -20,6 +22,7 @@ func TestBlockProbability(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWinnerProba(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_PROB_002
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
const N = 1000000
|
||||
winnerProba := noWinnersProb()
|
||||
|
@ -281,12 +281,11 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message,
|
||||
// gas checks
|
||||
|
||||
// 4. Min Gas
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
|
||||
|
||||
check = api.MessageCheckStatus{
|
||||
Cid: m.Cid(),
|
||||
CheckStatus: api.CheckStatus{
|
||||
Code: api.CheckStatusMessageMinGas,
|
||||
CheckStatus: api.CheckStatus{Code: api.CheckStatusMessageMinGas,
|
||||
Hint: map[string]interface{}{
|
||||
"minGas": minGas,
|
||||
},
|
||||
|
@ -629,7 +629,11 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err
|
||||
// a (soft) validation error.
|
||||
func (mp *MessagePool) verifyMsgBeforeAdd(m *types.SignedMessage, curTs *types.TipSet, local bool) (bool, error) {
|
||||
epoch := curTs.Height() + 1
|
||||
minGas := vm.PricelistByEpoch(epoch).OnChainMessage(m.ChainLength())
|
||||
nv, err := mp.getNtwkVersion(epoch)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("getting network version: %w", err)
|
||||
}
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(epoch, nv).OnChainMessage(m.ChainLength())
|
||||
|
||||
if err := m.VMMessage().ValidForBlockInclusion(minGas.Total(), build.NewestNetworkVersion); err != nil {
|
||||
return false, xerrors.Errorf("message will not be included in a block: %w", err)
|
||||
|
@ -854,7 +854,6 @@ func TestMessageValueTooHigh(t *testing.T) {
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@ -901,8 +900,7 @@ func TestMessageSignatureInvalid(t *testing.T) {
|
||||
}
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
assert.Error(t, err)
|
||||
// assert.Contains(t, err.Error(), "invalid signature length")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid signature length")
|
||||
}
|
||||
}
|
||||
|
||||
@ -926,14 +924,29 @@ func TestAddMessageTwice(t *testing.T) {
|
||||
to := mock.Address(1001)
|
||||
|
||||
{
|
||||
// create a valid messages
|
||||
sm := makeTestMessage(w, from, to, 0, 50_000_000, minimumBaseFee.Uint64())
|
||||
msg := &types.Message{
|
||||
To: to,
|
||||
From: from,
|
||||
Value: types.NewInt(1),
|
||||
Nonce: 0,
|
||||
GasLimit: 50000000,
|
||||
GasFeeCap: types.NewInt(minimumBaseFee.Uint64()),
|
||||
GasPremium: types.NewInt(1),
|
||||
Params: make([]byte, 32<<10),
|
||||
}
|
||||
|
||||
sig, err := w.WalletSign(context.TODO(), from, msg.Cid().Bytes(), api.MsgMeta{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
sm := &types.SignedMessage{
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
mustAdd(t, mp, sm)
|
||||
|
||||
// try to add it twice
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "with nonce 0 already in mpool")
|
||||
}
|
||||
}
|
||||
|
||||
@ -963,8 +976,7 @@ func TestAddMessageTwiceNonceGap(t *testing.T) {
|
||||
|
||||
// then try to add message again
|
||||
err = mp.Add(context.TODO(), sm)
|
||||
// assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unfulfilled nonce gap")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -781,6 +781,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
// cannot exceed the block limit; drop all messages that exceed the limit
|
||||
// - the total gasReward cannot exceed the actor's balance; drop all messages that exceed
|
||||
// the balance
|
||||
|
||||
a, err := mp.api.GetActorAfter(actor, ts)
|
||||
if err != nil {
|
||||
log.Errorf("failed to load actor state, not building chain for %s: %v", actor, err)
|
||||
@ -793,6 +794,12 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
skip := 0
|
||||
i := 0
|
||||
rewards := make([]*big.Int, 0, len(msgs))
|
||||
|
||||
nv, err := mp.getNtwkVersion(ts.Height())
|
||||
if err != nil {
|
||||
log.Errorf("getting network version: %v", err)
|
||||
return nil
|
||||
}
|
||||
for i = 0; i < len(msgs); i++ {
|
||||
m := msgs[i]
|
||||
|
||||
@ -808,7 +815,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6
|
||||
}
|
||||
curNonce++
|
||||
|
||||
minGas := vm.PricelistByEpoch(ts.Height()).OnChainMessage(m.ChainLength()).Total()
|
||||
minGas := vm.PricelistByEpochAndNetworkVersion(ts.Height(), nv).OnChainMessage(m.ChainLength()).Total()
|
||||
if m.Message.GasLimit < minGas {
|
||||
break
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package state
|
||||
|
||||
import (
|
||||
@ -18,6 +19,7 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkStateTreeSet(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
st, err := NewStateTree(cst, types.StateTreeVersion1)
|
||||
if err != nil {
|
||||
@ -45,6 +47,7 @@ func BenchmarkStateTreeSet(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkStateTreeSetFlush(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -80,6 +83,8 @@ func BenchmarkStateTreeSetFlush(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestResolveCache(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_SNAPSHOT_001, @CHAIN_STATETREE_SNAPSHOT_CLEAR_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -182,6 +187,8 @@ func TestResolveCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkStateTree10kGetActor(b *testing.B) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_FLUSH_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -229,6 +236,7 @@ func BenchmarkStateTree10kGetActor(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestSetCache(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
sv, err := VersionForNetwork(build.NewestNetworkVersion)
|
||||
if err != nil {
|
||||
@ -270,6 +278,8 @@ func TestSetCache(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSnapshots(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001
|
||||
//stm: @CHAIN_STATETREE_FLUSH_001, @CHAIN_STATETREE_SNAPSHOT_REVERT_001, CHAIN_STATETREE_SNAPSHOT_CLEAR_001
|
||||
ctx := context.Background()
|
||||
cst := cbor.NewMemCborStore()
|
||||
|
||||
@ -360,6 +370,7 @@ func assertNotHas(t *testing.T, st *StateTree, addr address.Address) {
|
||||
}
|
||||
|
||||
func TestStateTreeConsistency(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_SET_ACTOR_001, @CHAIN_STATETREE_VERSION_FOR_NETWORK_001, @CHAIN_STATETREE_FLUSH_001
|
||||
cst := cbor.NewMemCborStore()
|
||||
|
||||
// TODO: ActorUpgrade: this test tests pre actors v2
|
||||
|
@ -8,6 +8,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/v7/actors/migration/nv15"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
@ -161,7 +163,8 @@ func (us UpgradeSchedule) GetNtwkVersion(e abi.ChainEpoch) (network.Version, err
|
||||
return u.Network, nil
|
||||
}
|
||||
}
|
||||
return network.Version0, xerrors.Errorf("Epoch %d has no defined network version", e)
|
||||
|
||||
return build.GenesisNetworkVersion, nil
|
||||
}
|
||||
|
||||
func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, height abi.ChainEpoch, cb ExecMonitor, ts *types.TipSet) (cid.Cid, error) {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #integration
|
||||
package stmgr_test
|
||||
|
||||
import (
|
||||
@ -106,6 +107,9 @@ func (ta *testActor) TestMethod(rt rt2.Runtime, params *abi.EmptyValue) *abi.Emp
|
||||
}
|
||||
|
||||
func TestForkHeightTriggers(t *testing.T) {
|
||||
//stm: @CHAIN_STATETREE_GET_ACTOR_001, @CHAIN_STATETREE_FLUSH_001, @TOKEN_WALLET_SIGN_001
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
ctx := context.TODO()
|
||||
@ -241,6 +245,8 @@ func TestForkHeightTriggers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestForkRefuseCall(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001, @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001, @CHAIN_STATE_CALL_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
for after := 0; after < 3; after++ {
|
||||
@ -360,6 +366,8 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
|
||||
}
|
||||
|
||||
func TestForkPreMigration(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001,
|
||||
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package stmgr_test
|
||||
|
||||
import (
|
||||
@ -12,6 +13,8 @@ import (
|
||||
)
|
||||
|
||||
func TestSearchForMessageReplacements(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_SEARCH_MSG_001
|
||||
ctx := context.Background()
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: #unit
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
@ -10,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBaseFee(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_COMPUTE_NEXT_BASE_FEE_001
|
||||
tests := []struct {
|
||||
basefee uint64
|
||||
limitUsed int64
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -10,6 +11,9 @@ import (
|
||||
)
|
||||
|
||||
func TestChainCheckpoint(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_FROM_MINERS_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_FROM_KEY_001, @CHAIN_STORE_SET_HEAD_001, @CHAIN_STORE_GET_HEAVIEST_TIPSET_001
|
||||
//stm: @CHAIN_STORE_SET_CHECKPOINT_001, @CHAIN_STORE_MAYBE_TAKE_HEAVIER_TIPSET_001, @CHAIN_STORE_REMOVE_CHECKPOINT_001
|
||||
ctx := context.Background()
|
||||
|
||||
cg, err := gen.NewGenerator()
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHeadChangeCoalescer(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_COALESCE_HEAD_CHANGE_001
|
||||
notif := make(chan headChange, 1)
|
||||
c := NewHeadChangeCoalescer(func(revert, apply []*types.TipSet) error {
|
||||
notif <- headChange{apply: apply, revert: revert}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -17,6 +18,9 @@ import (
|
||||
)
|
||||
|
||||
func TestIndexSeeks(t *testing.T) {
|
||||
//stm: @CHAIN_STORE_IMPORT_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001, @CHAIN_STORE_PUT_TIPSET_001, @CHAIN_STORE_SET_GENESIS_BLOCK_001
|
||||
//stm: @CHAIN_STORE_CLOSE_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package store_test
|
||||
|
||||
import (
|
||||
@ -28,6 +29,8 @@ func init() {
|
||||
}
|
||||
|
||||
func BenchmarkGetRandomness(b *testing.B) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
@ -85,6 +88,8 @@ func BenchmarkGetRandomness(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestChainExportImport(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STORE_IMPORT_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -120,6 +125,9 @@ func TestChainExportImport(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestChainExportImportFull(t *testing.T) {
|
||||
//stm: @CHAIN_GEN_NEXT_TIPSET_001
|
||||
//stm: @CHAIN_STORE_IMPORT_001, @CHAIN_STORE_EXPORT_001, @CHAIN_STORE_SET_HEAD_001
|
||||
//stm: @CHAIN_STORE_GET_TIPSET_BY_HEIGHT_001
|
||||
cg, err := gen.NewGenerator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package chain
|
||||
|
||||
import (
|
||||
@ -78,6 +79,7 @@ func assertGetSyncOp(t *testing.T, c chan *syncOp, ts *types.TipSet) {
|
||||
}
|
||||
|
||||
func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_SET_PEER_HEAD_001
|
||||
ctx := context.Background()
|
||||
|
||||
a := mock.TipSet(mock.MkBlock(genTs, 1, 1))
|
||||
@ -161,6 +163,7 @@ func TestSyncManagerEdgeCase(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncManager(t *testing.T) {
|
||||
//stm: @CHAIN_SYNCER_SET_PEER_HEAD_001
|
||||
ctx := context.Background()
|
||||
|
||||
a := mock.TipSet(mock.MkBlock(genTs, 1, 1))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -14,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func TestBigIntSerializationRoundTrip(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_PARSE_BIGINT_001
|
||||
testValues := []string{
|
||||
"0", "1", "10", "-10", "9999", "12345678901234567891234567890123456789012345678901234567890",
|
||||
}
|
||||
@ -42,6 +44,7 @@ func TestBigIntSerializationRoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFilRoundTrip(t *testing.T) {
|
||||
//stm: @TYPES_FIL_PARSE_001
|
||||
testValues := []string{
|
||||
"0 FIL", "1 FIL", "1.001 FIL", "100.10001 FIL", "101100 FIL", "5000.01 FIL", "5000 FIL",
|
||||
}
|
||||
@ -59,6 +62,7 @@ func TestFilRoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStr(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
cases := []struct {
|
||||
in uint64
|
||||
out string
|
||||
@ -79,6 +83,7 @@ func TestSizeStr(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStrUnitsSymmetry(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
s := rand.NewSource(time.Now().UnixNano())
|
||||
r := rand.New(s)
|
||||
|
||||
@ -95,6 +100,7 @@ func TestSizeStrUnitsSymmetry(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSizeStrBig(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIZE_BIGINT_001
|
||||
ZiB := big.NewInt(50000)
|
||||
ZiB = ZiB.Lsh(ZiB, 70)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -51,6 +52,7 @@ func testBlockHeader(t testing.TB) *BlockHeader {
|
||||
}
|
||||
|
||||
func TestBlockHeaderSerialization(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_BLOCK_HEADER_FROM_CBOR_001, @CHAIN_TYPES_BLOCK_HEADER_TO_CBOR_001
|
||||
bh := testBlockHeader(t)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
@ -71,6 +73,7 @@ func TestBlockHeaderSerialization(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInteropBH(t *testing.T) {
|
||||
//stm: @OTHER_IMPLEMENTATION_BLOCK_INTEROP_001
|
||||
newAddr, err := address.NewSecp256k1Address([]byte("address0"))
|
||||
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -11,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPoissonFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_POISSON_001
|
||||
tests := []struct {
|
||||
lambdaBase uint64
|
||||
lambdaShift uint
|
||||
@ -47,6 +49,7 @@ func TestPoissonFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLambdaFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_LAMBDA_001
|
||||
tests := []struct {
|
||||
power string
|
||||
totalPower string
|
||||
@ -72,6 +75,7 @@ func TestLambdaFunction(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExpFunction(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_NEGATIVE_EXP_001
|
||||
const N = 256
|
||||
|
||||
step := big.NewInt(5)
|
||||
@ -100,6 +104,7 @@ func q256ToF(x *big.Int) float64 {
|
||||
}
|
||||
|
||||
func TestElectionLam(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_LAMBDA_001
|
||||
p := big.NewInt(64)
|
||||
tot := big.NewInt(128)
|
||||
lam := lambda(p, tot)
|
||||
@ -128,6 +133,7 @@ func BenchmarkWinCounts(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestWinCounts(t *testing.T) {
|
||||
//stm: @TYPES_ELECTION_PROOF_COMPUTE_WIN_COUNT_001
|
||||
totalPower := NewInt(100)
|
||||
power := NewInt(20)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -7,6 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestFilShort(t *testing.T) {
|
||||
//stm: @TYPES_FIL_PARSE_001
|
||||
for _, s := range []struct {
|
||||
fil string
|
||||
expect string
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -71,6 +72,7 @@ func TestEqualCall(t *testing.T) {
|
||||
Params: []byte("hai"),
|
||||
}
|
||||
|
||||
//stm: @TYPES_MESSAGE_EQUAL_CALL_001
|
||||
require.True(t, m1.EqualCall(m2))
|
||||
require.True(t, m1.EqualCall(m3))
|
||||
require.False(t, m1.EqualCall(m4))
|
||||
@ -97,11 +99,13 @@ func TestMessageJson(t *testing.T) {
|
||||
exp := []byte("{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}}")
|
||||
fmt.Println(string(b))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001
|
||||
require.Equal(t, exp, b)
|
||||
|
||||
var um Message
|
||||
require.NoError(t, json.Unmarshal(b, &um))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
require.EqualValues(t, *m, um)
|
||||
}
|
||||
|
||||
@ -131,10 +135,12 @@ func TestSignedMessageJson(t *testing.T) {
|
||||
exp := []byte("{\"Message\":{\"Version\":0,\"To\":\"f04\",\"From\":\"f00\",\"Nonce\":34,\"Value\":\"0\",\"GasLimit\":123,\"GasFeeCap\":\"234\",\"GasPremium\":\"234\",\"Method\":6,\"Params\":\"aGFp\",\"CID\":{\"/\":\"bafy2bzaced5rdpz57e64sc7mdwjn3blicglhpialnrph2dlbufhf6iha63dmc\"}},\"Signature\":{\"Type\":0,\"Data\":null},\"CID\":{\"/\":\"bafy2bzacea5ainifngxj3rygaw2hppnyz2cw72x5pysqty2x6dxmjs5qg2uus\"}}")
|
||||
fmt.Println(string(b))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_001
|
||||
require.Equal(t, exp, b)
|
||||
|
||||
var um SignedMessage
|
||||
require.NoError(t, json.Unmarshal(b, &um))
|
||||
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
require.EqualValues(t, *sm, um)
|
||||
}
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignatureSerializeRoundTrip(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SIGNATURE_SERIALIZATION_001
|
||||
s := &crypto.Signature{
|
||||
Data: []byte("foo bar cat dog"),
|
||||
Type: crypto.SigTypeBLS,
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
@ -12,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestTipSetKey(t *testing.T) {
|
||||
//stm: @TYPES_TIPSETKEY_FROM_BYTES_001, @TYPES_TIPSETKEY_NEW_001
|
||||
cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}
|
||||
c1, _ := cb.Sum([]byte("a"))
|
||||
c2, _ := cb.Sum([]byte("b"))
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package types
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package chain
|
||||
|
||||
import (
|
||||
@ -12,6 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func TestSignedMessageJsonRoundtrip(t *testing.T) {
|
||||
//stm: @TYPES_MESSAGE_JSON_EQUAL_CALL_002
|
||||
to, _ := address.NewIDAddress(5234623)
|
||||
from, _ := address.NewIDAddress(603911192)
|
||||
smsg := &types.SignedMessage{
|
||||
@ -40,6 +42,7 @@ func TestSignedMessageJsonRoundtrip(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddressType(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_ADDRESS_PREFIX_001
|
||||
build.SetAddressNetwork(address.Testnet)
|
||||
addr, err := makeRandomAddress()
|
||||
if err != nil {
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vectors
|
||||
|
||||
import (
|
||||
@ -26,6 +27,7 @@ func LoadVector(t *testing.T, f string, out interface{}) {
|
||||
}
|
||||
|
||||
func TestBlockHeaderVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_BLOCK_001
|
||||
var headers []HeaderVector
|
||||
LoadVector(t, "block_headers.json", &headers)
|
||||
|
||||
@ -46,6 +48,7 @@ func TestBlockHeaderVectors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMessageSigningVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_SIGNED_MESSAGE_001
|
||||
var msvs []MessageSigningVector
|
||||
LoadVector(t, "message_signing.json", &msvs)
|
||||
|
||||
@ -64,6 +67,7 @@ func TestMessageSigningVectors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnsignedMessageVectors(t *testing.T) {
|
||||
//stm: @CHAIN_TYPES_SERIALIZATION_MESSAGE_001
|
||||
var msvs []UnsignedMessageVector
|
||||
LoadVector(t, "unsigned_messages.json", &msvs)
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestGasBurn(t *testing.T) {
|
||||
//stm: @BURN_ESTIMATE_GAS_OVERESTIMATION_BURN_001
|
||||
tests := []struct {
|
||||
used int64
|
||||
limit int64
|
||||
@ -40,6 +42,7 @@ func TestGasBurn(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGasOutputs(t *testing.T) {
|
||||
//stm: @BURN_ESTIMATE_GAS_OUTPUTS_001
|
||||
baseFee := types.NewInt(10)
|
||||
tests := []struct {
|
||||
used int64
|
||||
|
@ -5,6 +5,9 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
@ -15,7 +18,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"github.com/filecoin-project/lotus/lib/sigs"
|
||||
@ -30,7 +32,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
)
|
||||
|
||||
var _ Interface = (*FVM)(nil)
|
||||
@ -40,6 +41,7 @@ type FvmExtern struct {
|
||||
Rand
|
||||
blockstore.Blockstore
|
||||
epoch abi.ChainEpoch
|
||||
nv network.Version
|
||||
lbState LookbackStateGetter
|
||||
base cid.Cid
|
||||
}
|
||||
@ -181,7 +183,7 @@ func (x *FvmExtern) workerKeyAtLookback(ctx context.Context, minerId address.Add
|
||||
}
|
||||
|
||||
cstWithoutGas := cbor.NewCborStore(x.Blockstore)
|
||||
cbb := &gasChargingBlocks{gasAdder, PricelistByEpoch(x.epoch), x.Blockstore}
|
||||
cbb := &gasChargingBlocks{gasAdder, PricelistByEpochAndNetworkVersion(x.epoch, x.nv), x.Blockstore}
|
||||
cstWithGas := cbor.NewCborStore(cbb)
|
||||
|
||||
lbState, err := x.lbState(ctx, height)
|
||||
@ -241,7 +243,7 @@ func NewFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
|
||||
|
||||
fvmOpts := ffi.FVMOpts{
|
||||
FVMVersion: 0,
|
||||
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch},
|
||||
Externs: &FvmExtern{Rand: opts.Rand, Blockstore: opts.Bstore, lbState: opts.LookbackState, base: opts.StateBase, epoch: opts.Epoch, nv: opts.NetworkVersion},
|
||||
Epoch: opts.Epoch,
|
||||
BaseFee: opts.BaseFee,
|
||||
BaseCircSupply: circToReport,
|
||||
|
@ -3,6 +3,8 @@ package vm
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime"
|
||||
proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof"
|
||||
|
||||
@ -82,10 +84,7 @@ type Pricelist interface {
|
||||
OnVerifyConsensusFault() GasCharge
|
||||
}
|
||||
|
||||
// Prices are the price lists per starting epoch. Public for testing purposes
|
||||
// (concretely to allow the test vector runner to rebase prices).
|
||||
var Prices = map[abi.ChainEpoch]Pricelist{
|
||||
abi.ChainEpoch(0): &pricelistV0{
|
||||
var priceListGenesis = pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1000,
|
||||
|
||||
@ -133,8 +132,9 @@ var Prices = map[abi.ChainEpoch]Pricelist{
|
||||
},
|
||||
verifyPostDiscount: true,
|
||||
verifyConsensusFault: 495422,
|
||||
},
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &pricelistV0{
|
||||
}
|
||||
|
||||
var priceListCalico = pricelistV0{
|
||||
computeGasMulti: 1,
|
||||
storageGasMulti: 1300,
|
||||
|
||||
@ -211,16 +211,26 @@ var Prices = map[abi.ChainEpoch]Pricelist{
|
||||
verifyConsensusFault: 495422,
|
||||
|
||||
verifyReplicaUpdate: 36316136,
|
||||
},
|
||||
}
|
||||
|
||||
// PricelistByEpoch finds the latest prices for the given epoch
|
||||
func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist {
|
||||
// Prices are the price lists per starting epoch.
|
||||
// For network v8 and onwards, this is disregarded; the pricelist is selected by network version.
|
||||
var pricesByEpoch = map[abi.ChainEpoch]Pricelist{
|
||||
abi.ChainEpoch(0): &priceListGenesis,
|
||||
abi.ChainEpoch(build.UpgradeCalicoHeight): &priceListCalico,
|
||||
}
|
||||
|
||||
// PricelistByEpochAndNetworkVersion finds the latest prices for the given epoch
|
||||
func PricelistByEpochAndNetworkVersion(epoch abi.ChainEpoch, nv network.Version) Pricelist {
|
||||
if nv >= network.Version8 {
|
||||
return &priceListCalico
|
||||
}
|
||||
|
||||
// since we are storing the prices as map or epoch to price
|
||||
// we need to get the price with the highest epoch that is lower or equal to the `epoch` arg
|
||||
bestEpoch := abi.ChainEpoch(0)
|
||||
bestPrice := Prices[bestEpoch]
|
||||
for e, pl := range Prices {
|
||||
bestPrice := pricesByEpoch[bestEpoch]
|
||||
for e, pl := range pricesByEpoch {
|
||||
// if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch`
|
||||
if e > bestEpoch && e <= epoch {
|
||||
bestEpoch = e
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -106,6 +107,7 @@ func (*basicRtMessage) ValueReceived() abi.TokenAmount {
|
||||
}
|
||||
|
||||
func TestInvokerBasic(t *testing.T) {
|
||||
//stm: @INVOKER_TRANSFORM_001
|
||||
inv := ActorRegistry{}
|
||||
code, err := inv.transform(basicContract{})
|
||||
assert.NoError(t, err)
|
||||
|
@ -51,7 +51,7 @@ var EmptyObjectCid cid.Cid
|
||||
|
||||
// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses.
|
||||
func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) {
|
||||
if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil {
|
||||
if err := rt.chargeGasSafe(PricelistByEpochAndNetworkVersion(rt.height, rt.NetworkVersion()).OnCreateActor()); err != nil {
|
||||
return nil, address.Undef, err
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package vm
|
||||
|
||||
import (
|
||||
@ -22,6 +23,7 @@ func (*NotAVeryGoodMarshaler) MarshalCBOR(writer io.Writer) error {
|
||||
var _ cbg.CBORMarshaler = &NotAVeryGoodMarshaler{}
|
||||
|
||||
func TestRuntimePutErrors(t *testing.T) {
|
||||
//stm: @CHAIN_VM_STORE_PUT_002
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil {
|
||||
|
@ -42,7 +42,7 @@ const MaxCallDepth = 4096
|
||||
|
||||
var (
|
||||
log = logging.Logger("vm")
|
||||
actorLog = logging.Logger("actors")
|
||||
actorLog = logging.WithSkip(logging.Logger("actors"), 1)
|
||||
gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||
)
|
||||
|
||||
@ -135,7 +135,7 @@ func (vm *LegacyVM) makeRuntime(ctx context.Context, msg *types.Message, parent
|
||||
gasAvailable: msg.GasLimit,
|
||||
depth: 0,
|
||||
numActorsCreated: 0,
|
||||
pricelist: PricelistByEpoch(vm.blockHeight),
|
||||
pricelist: PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion),
|
||||
allowInternal: true,
|
||||
callerValidated: false,
|
||||
executionTrace: types.ExecutionTrace{Msg: msg},
|
||||
@ -431,7 +431,7 @@ func (vm *LegacyVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*App
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pl := PricelistByEpoch(vm.blockHeight)
|
||||
pl := PricelistByEpochAndNetworkVersion(vm.blockHeight, vm.networkVersion)
|
||||
|
||||
msgGas := pl.OnChainMessage(cmsg.ChainLength())
|
||||
msgGasCost := msgGas.Total()
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -128,7 +127,7 @@ var AuthApiInfoToken = &cli.Command{
|
||||
|
||||
// TODO: Log in audit log when it is implemented
|
||||
|
||||
currentEnv, _, _ := cliutil.EnvsForAPIInfos(t)
|
||||
currentEnv, _, _ := t.APIInfoEnvVars()
|
||||
fmt.Printf("%s=%s:%s\n", currentEnv, string(token), ainfo.Addr)
|
||||
return nil
|
||||
},
|
||||
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -515,7 +515,7 @@ The minimum value is 518400 (6 months).`,
|
||||
}
|
||||
|
||||
func interactiveDeal(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
api, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -873,8 +873,7 @@ uiLoop:
|
||||
continue uiLoop
|
||||
}
|
||||
|
||||
ask = append(ask, *a)
|
||||
|
||||
ask = append(ask, *a.Response)
|
||||
}
|
||||
|
||||
// TODO: run more validation
|
||||
@ -1404,9 +1403,13 @@ var clientListAsksCmd = &cli.Command{
|
||||
Value: "text",
|
||||
Usage: "Either 'text' or 'csv'",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "protocols",
|
||||
Usage: "Output supported deal protocols",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
api, closer, err := GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1423,21 +1426,27 @@ var clientListAsksCmd = &cli.Command{
|
||||
return asks[i].Ping < asks[j].Ping
|
||||
})
|
||||
}
|
||||
pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s\n"
|
||||
pfmt := "%s: min:%s max:%s price:%s/GiB/Epoch verifiedPrice:%s/GiB/Epoch ping:%s protos:%s\n"
|
||||
if cctx.String("output-format") == "csv" {
|
||||
fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping\n")
|
||||
pfmt = "%s,%s,%s,%s,%s,%s\n"
|
||||
fmt.Printf("Miner,Min,Max,Price,VerifiedPrice,Ping,Protocols")
|
||||
pfmt = "%s,%s,%s,%s,%s,%s,%s\n"
|
||||
}
|
||||
|
||||
for _, a := range asks {
|
||||
ask := a.Ask
|
||||
|
||||
protos := ""
|
||||
if cctx.Bool("protocols") {
|
||||
protos = "[" + strings.Join(a.DealProtocols, ",") + "]"
|
||||
}
|
||||
|
||||
fmt.Printf(pfmt, ask.Miner,
|
||||
types.SizeStr(types.NewInt(uint64(ask.MinPieceSize))),
|
||||
types.SizeStr(types.NewInt(uint64(ask.MaxPieceSize))),
|
||||
types.FIL(ask.Price),
|
||||
types.FIL(ask.VerifiedPrice),
|
||||
a.Ping,
|
||||
protos,
|
||||
)
|
||||
}
|
||||
|
||||
@ -1447,10 +1456,12 @@ var clientListAsksCmd = &cli.Command{
|
||||
|
||||
type QueriedAsk struct {
|
||||
Ask *storagemarket.StorageAsk
|
||||
DealProtocols []string
|
||||
|
||||
Ping time.Duration
|
||||
}
|
||||
|
||||
func GetAsks(ctx context.Context, api v0api.FullNode) ([]QueriedAsk, error) {
|
||||
func GetAsks(ctx context.Context, api lapi.FullNode) ([]QueriedAsk, error) {
|
||||
isTTY := true
|
||||
if fileInfo, _ := os.Stdout.Stat(); (fileInfo.Mode() & os.ModeCharDevice) == 0 {
|
||||
isTTY = false
|
||||
@ -1561,7 +1572,9 @@ loop:
|
||||
atomic.AddInt64(&got, 1)
|
||||
lk.Lock()
|
||||
asks = append(asks, QueriedAsk{
|
||||
Ask: ask,
|
||||
Ask: ask.Response,
|
||||
DealProtocols: ask.DealProtocols,
|
||||
|
||||
Ping: pingDuration,
|
||||
})
|
||||
lk.Unlock()
|
||||
@ -1915,6 +1928,7 @@ type deal struct {
|
||||
var clientGetDealCmd = &cli.Command{
|
||||
Name: "get-deal",
|
||||
Usage: "Print detailed deal information",
|
||||
ArgsUsage: "[proposalCID]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Args().Present() {
|
||||
return cli.ShowCommandHelp(cctx, cctx.Command.Name)
|
||||
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -161,6 +161,7 @@ var msigCreateCmd = &cli.Command{
|
||||
msgCid := sm.Cid()
|
||||
|
||||
fmt.Println("sent create in message: ", msgCid)
|
||||
fmt.Println("waiting for confirmation..")
|
||||
|
||||
// wait for it to get mined into a block
|
||||
wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true)
|
||||
|
120
cli/net.go
120
cli/net.go
@ -1,12 +1,14 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -28,6 +30,7 @@ var NetCmd = &cli.Command{
|
||||
Usage: "Manage P2P Network",
|
||||
Subcommands: []*cli.Command{
|
||||
NetPeers,
|
||||
NetPing,
|
||||
NetConnect,
|
||||
NetListen,
|
||||
NetId,
|
||||
@ -117,6 +120,82 @@ var NetPeers = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var NetPing = &cli.Command{
|
||||
Name: "ping",
|
||||
Usage: "Ping peers",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "count",
|
||||
Value: 10,
|
||||
Aliases: []string{"c"},
|
||||
Usage: "specify the number of times it should ping",
|
||||
},
|
||||
&cli.DurationFlag{
|
||||
Name: "interval",
|
||||
Value: time.Second,
|
||||
Aliases: []string{"i"},
|
||||
Usage: "minimum time between pings",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return xerrors.Errorf("please provide a peerID")
|
||||
}
|
||||
|
||||
api, closer, err := GetAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
pis, err := addrInfoFromArg(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count := cctx.Int("count")
|
||||
interval := cctx.Duration("interval")
|
||||
|
||||
for _, pi := range pis {
|
||||
err := api.NetConnect(ctx, pi)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("connect: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("PING %s\n", pi.ID)
|
||||
var avg time.Duration
|
||||
var successful int
|
||||
|
||||
for i := 0; i < count && ctx.Err() == nil; i++ {
|
||||
start := time.Now()
|
||||
|
||||
rtt, err := api.NetPing(ctx, pi.ID)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
break
|
||||
}
|
||||
log.Errorf("Ping failed: error=%v", err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Pong received: time=%v\n", rtt)
|
||||
avg = avg + rtt
|
||||
successful++
|
||||
|
||||
wctx, cancel := context.WithTimeout(ctx, time.Until(start.Add(interval)))
|
||||
<-wctx.Done()
|
||||
cancel()
|
||||
}
|
||||
|
||||
if successful > 0 {
|
||||
fmt.Printf("Average latency: %v\n", avg/time.Duration(successful))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var NetScores = &cli.Command{
|
||||
Name: "scores",
|
||||
Usage: "Print peers' pubsub scores",
|
||||
@ -192,26 +271,46 @@ var NetConnect = &cli.Command{
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
pis, err := addrInfoFromArg(ctx, cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pi := range pis {
|
||||
fmt.Printf("connect %s: ", pi.ID.Pretty())
|
||||
err := api.NetConnect(ctx, pi)
|
||||
if err != nil {
|
||||
fmt.Println("failure")
|
||||
return err
|
||||
}
|
||||
fmt.Println("success")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func addrInfoFromArg(ctx context.Context, cctx *cli.Context) ([]peer.AddrInfo, error) {
|
||||
pis, err := addrutil.ParseAddresses(ctx, cctx.Args().Slice())
|
||||
if err != nil {
|
||||
a, perr := address.NewFromString(cctx.Args().First())
|
||||
if perr != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
na, fc, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer fc()
|
||||
|
||||
mi, err := na.StateMinerInfo(ctx, a, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
return nil, xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
if mi.PeerId == nil {
|
||||
return xerrors.Errorf("no PeerID for miner")
|
||||
return nil, xerrors.Errorf("no PeerID for miner")
|
||||
}
|
||||
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
|
||||
for i, a := range mi.Multiaddrs {
|
||||
@ -233,18 +332,7 @@ var NetConnect = &cli.Command{
|
||||
pis = append(pis, pi)
|
||||
}
|
||||
|
||||
for _, pi := range pis {
|
||||
fmt.Printf("connect %s: ", pi.ID.Pretty())
|
||||
err := api.NetConnect(ctx, pi)
|
||||
if err != nil {
|
||||
fmt.Println("failure")
|
||||
return err
|
||||
}
|
||||
fmt.Println("success")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
return pis, err
|
||||
}
|
||||
|
||||
var NetId = &cli.Command{
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,5 @@
|
||||
//stm: ignore
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -28,63 +28,6 @@ const (
|
||||
metadataTraceContext = "traceContext"
|
||||
)
|
||||
|
||||
// flagsForAPI returns flags passed on the command line with the listen address
|
||||
// of the API server (only used by the tests), in the order of precedence they
|
||||
// should be applied for the requested kind of node.
|
||||
func flagsForAPI(t repo.RepoType) []string {
|
||||
switch t {
|
||||
case repo.FullNode:
|
||||
return []string{"api-url"}
|
||||
case repo.StorageMiner:
|
||||
return []string{"miner-api-url"}
|
||||
case repo.Worker:
|
||||
return []string{"worker-api-url"}
|
||||
case repo.Markets:
|
||||
// support split markets-miner and monolith deployments.
|
||||
return []string{"markets-api-url", "miner-api-url"}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
}
|
||||
|
||||
func flagsForRepo(t repo.RepoType) []string {
|
||||
switch t {
|
||||
case repo.FullNode:
|
||||
return []string{"repo"}
|
||||
case repo.StorageMiner:
|
||||
return []string{"miner-repo"}
|
||||
case repo.Worker:
|
||||
return []string{"worker-repo"}
|
||||
case repo.Markets:
|
||||
// support split markets-miner and monolith deployments.
|
||||
return []string{"markets-repo", "miner-repo"}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
}
|
||||
|
||||
// EnvsForAPIInfos returns the environment variables to use in order of precedence
|
||||
// to determine the API endpoint of the specified node type.
|
||||
//
|
||||
// It returns the current variables and deprecated ones separately, so that
|
||||
// the user can log a warning when deprecated ones are found to be in use.
|
||||
func EnvsForAPIInfos(t repo.RepoType) (primary string, fallbacks []string, deprecated []string) {
|
||||
switch t {
|
||||
case repo.FullNode:
|
||||
return "FULLNODE_API_INFO", nil, nil
|
||||
case repo.StorageMiner:
|
||||
// TODO remove deprecated deprecation period
|
||||
return "MINER_API_INFO", nil, []string{"STORAGE_API_INFO"}
|
||||
case repo.Worker:
|
||||
return "WORKER_API_INFO", nil, nil
|
||||
case repo.Markets:
|
||||
// support split markets-miner and monolith deployments.
|
||||
return "MARKETS_API_INFO", []string{"MINER_API_INFO"}, nil
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown repo type: %v", t))
|
||||
}
|
||||
}
|
||||
|
||||
// GetAPIInfo returns the API endpoint to use for the specified kind of repo.
|
||||
//
|
||||
// The order of precedence is as follows:
|
||||
@ -96,8 +39,7 @@ func EnvsForAPIInfos(t repo.RepoType) (primary string, fallbacks []string, depre
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
// Check if there was a flag passed with the listen address of the API
|
||||
// server (only used by the tests)
|
||||
apiFlags := flagsForAPI(t)
|
||||
for _, f := range apiFlags {
|
||||
for _, f := range t.APIFlags() {
|
||||
if !ctx.IsSet(f) {
|
||||
continue
|
||||
}
|
||||
@ -111,7 +53,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
// Note: it is not correct/intuitive to prefer environment variables over
|
||||
// CLI flags (repo flags below).
|
||||
//
|
||||
primaryEnv, fallbacksEnvs, deprecatedEnvs := EnvsForAPIInfos(t)
|
||||
primaryEnv, fallbacksEnvs, deprecatedEnvs := t.APIInfoEnvVars()
|
||||
env, ok := os.LookupEnv(primaryEnv)
|
||||
if ok {
|
||||
return ParseApiInfo(env), nil
|
||||
@ -125,8 +67,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
repoFlags := flagsForRepo(t)
|
||||
for _, f := range repoFlags {
|
||||
for _, f := range t.RepoFlags() {
|
||||
// cannot use ctx.IsSet because it ignores default values
|
||||
path := ctx.String(f)
|
||||
if path == "" {
|
||||
@ -175,13 +116,13 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t)
|
||||
return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
ainfo, err := GetAPIInfo(ctx, t)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get API info for %s: %w", t, err)
|
||||
return "", nil, xerrors.Errorf("could not get API info for %s: %w", t.Type(), err)
|
||||
}
|
||||
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
|
@ -1,4 +1,4 @@
|
||||
//stm: #cli
|
||||
//stm: #unit
|
||||
package cli
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -8,6 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
//stm: @CMD_LIMITER_GET_IP_LIMITER_001, @CMD_LIMITER_GET_WALLET_LIMITER_001
|
||||
limiter := NewLimiter(LimiterConfig{
|
||||
TotalRate: time.Second,
|
||||
TotalBurst: 20,
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -9,6 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestAppendCIDsToWindow(t *testing.T) {
|
||||
//stm: @CMD_HEALTH_APPEND_CIDS_001
|
||||
assert := assert.New(t)
|
||||
var window CidWindow
|
||||
threshold := 3
|
||||
@ -27,6 +29,7 @@ func TestAppendCIDsToWindow(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckWindow(t *testing.T) {
|
||||
//stm: @CMD_HEALTH_APPEND_CIDS_001, @CMD_HEALTH_CHECK_WINDOW_001
|
||||
assert := assert.New(t)
|
||||
threshold := 3
|
||||
|
||||
|
@ -916,8 +916,8 @@ var actorProposeChangeWorker = &cli.Command{
|
||||
return fmt.Errorf("Proposed worker address change not reflected on chain: expected '%s', found '%s'", na, mi.NewWorker)
|
||||
}
|
||||
|
||||
fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully proposed.\n", na)
|
||||
fmt.Fprintf(cctx.App.Writer, "Call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch)
|
||||
fmt.Fprintf(cctx.App.Writer, "Worker key change to %s successfully sent, change happens at height %d.\n", na, mi.WorkerChangeEpoch)
|
||||
fmt.Fprintf(cctx.App.Writer, "If you have no active deadlines, call 'confirm-change-worker' at or after height %d to complete.\n", mi.WorkerChangeEpoch)
|
||||
|
||||
return nil
|
||||
},
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #unit
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -23,6 +24,7 @@ import (
|
||||
)
|
||||
|
||||
func TestWorkerKeyChange(t *testing.T) {
|
||||
//stm: @OTHER_WORKER_KEY_CHANGE_001
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode")
|
||||
}
|
||||
@ -70,7 +72,7 @@ func TestWorkerKeyChange(t *testing.T) {
|
||||
result := output.String()
|
||||
output.Reset()
|
||||
|
||||
require.Contains(t, result, fmt.Sprintf("Worker key change to %s successfully proposed.", newKey))
|
||||
require.Contains(t, result, fmt.Sprintf("Worker key change to %s successfully sent", newKey))
|
||||
|
||||
epochRe := regexp.MustCompile("at or after height (?P<epoch>[0-9]+) to complete")
|
||||
matches := epochRe.FindStringSubmatch(result)
|
||||
|
@ -1,3 +1,4 @@
|
||||
//stm: #integration
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -49,6 +50,7 @@ func TestMinerAllInfo(t *testing.T) {
|
||||
|
||||
t.Run("pre-info-all", run)
|
||||
|
||||
//stm: @CLIENT_DATA_IMPORT_001, @CLIENT_STORAGE_DEALS_GET_001
|
||||
dh := kit.NewDealHarness(t, client, miner, miner)
|
||||
deal, res, inPath := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{Rseed: 6})
|
||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
)
|
||||
|
||||
@ -343,6 +344,41 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
fmt.Println()
|
||||
|
||||
ws, err := nodeApi.WorkerStats(ctx)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting worker stats: %w", err)
|
||||
}
|
||||
|
||||
workersByType := map[string]int{
|
||||
sealtasks.WorkerSealing: 0,
|
||||
sealtasks.WorkerWindowPoSt: 0,
|
||||
sealtasks.WorkerWinningPoSt: 0,
|
||||
}
|
||||
|
||||
wloop:
|
||||
for _, st := range ws {
|
||||
if !st.Enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, task := range st.Tasks {
|
||||
if task.WorkerType() != sealtasks.WorkerSealing {
|
||||
workersByType[task.WorkerType()]++
|
||||
continue wloop
|
||||
}
|
||||
}
|
||||
workersByType[sealtasks.WorkerSealing]++
|
||||
}
|
||||
|
||||
fmt.Printf("Workers: Seal(%d) WdPoSt(%d) WinPoSt(%d)\n",
|
||||
workersByType[sealtasks.WorkerSealing],
|
||||
workersByType[sealtasks.WorkerWindowPoSt],
|
||||
workersByType[sealtasks.WorkerWinningPoSt])
|
||||
}
|
||||
|
||||
if cctx.IsSet("blocks") {
|
||||
fmt.Println("Produced newest blocks:")
|
||||
err = producedBlocks(ctx, cctx.Int("blocks"), maddr, fullapi)
|
||||
@ -350,9 +386,6 @@ func handleMiningInfo(ctx context.Context, cctx *cli.Context, fullapi v0api.Full
|
||||
return err
|
||||
}
|
||||
}
|
||||
// TODO: grab actr state / info
|
||||
// * Sealed sectors (count / bytes)
|
||||
// * Power
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -50,8 +50,13 @@ var infoAllCmd = &cli.Command{
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Worker List")
|
||||
if err := sealingWorkersCmd.Action(cctx); err != nil {
|
||||
fmt.Println("\n#: Sealing Worker List")
|
||||
if err := workersCmd(true).Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
fmt.Println("\n#: Proving Worker List")
|
||||
if err := workersCmd(false).Action(cctx); err != nil {
|
||||
fmt.Println("ERROR: ", err)
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
|
||||
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||
@ -231,7 +232,7 @@ var initCmd = &cli.Command{
|
||||
|
||||
if !cctx.Bool("no-local-storage") {
|
||||
b, err := json.MarshalIndent(&stores.LocalStorageMeta{
|
||||
ID: stores.ID(uuid.New().String()),
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
CanStore: true,
|
||||
@ -466,7 +467,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
}
|
||||
stor := stores.NewRemote(lstor, si, http.Header(sa), 10, &stores.DefaultPartialFileHandler{})
|
||||
|
||||
smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.SealerConfig{
|
||||
smgr, err := sectorstorage.New(ctx, lstor, stor, lr, si, sectorstorage.Config{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
|
@ -212,6 +212,7 @@ var setAskCmd = &cli.Command{
|
||||
Name: "max-piece-size",
|
||||
Usage: "Set maximum piece size (w/bit-padding, in bytes) in ask to `SIZE`",
|
||||
DefaultText: "miner sector size",
|
||||
Value: "0",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
|
@ -1,10 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -17,7 +19,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
)
|
||||
|
||||
@ -30,6 +32,8 @@ var provingCmd = &cli.Command{
|
||||
provingDeadlineInfoCmd,
|
||||
provingFaultsCmd,
|
||||
provingCheckProvableCmd,
|
||||
workersCmd(false),
|
||||
provingComputeCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -365,6 +369,10 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
Name: "storage-id",
|
||||
Usage: "filter sectors by storage path (path id)",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "faulty",
|
||||
Usage: "only check faulty sectors",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
@ -376,7 +384,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
return xerrors.Errorf("could not parse deadline index: %w", err)
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
api, closer, err := lcli.GetFullNodeAPIV1(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -420,7 +428,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decls := sl[stores.ID(cctx.String("storage-id"))]
|
||||
decls := sl[storiface.ID(cctx.String("storage-id"))]
|
||||
|
||||
filter = map[abi.SectorID]struct{}{}
|
||||
for _, decl := range decls {
|
||||
@ -428,6 +436,38 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
if cctx.Bool("faulty") {
|
||||
parts, err := getAllPartitions(ctx, addr, api)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting partitions: %w", err)
|
||||
}
|
||||
|
||||
if filter != nil {
|
||||
for k := range filter {
|
||||
set, err := parts.FaultySectors.IsSet(uint64(k.Number))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !set {
|
||||
delete(filter, k)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filter = map[abi.SectorID]struct{}{}
|
||||
|
||||
err = parts.FaultySectors.ForEach(func(s uint64) error {
|
||||
filter[abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(s),
|
||||
}] = struct{}{}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for parIdx, par := range partitions {
|
||||
sectors := make(map[abi.SectorNumber]struct{})
|
||||
|
||||
@ -437,7 +477,6 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var tocheck []storage.SectorRef
|
||||
var update []bool
|
||||
for _, info := range sectorInfos {
|
||||
si := abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
@ -455,10 +494,9 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
ProofType: info.SealProof,
|
||||
ID: si,
|
||||
})
|
||||
update = append(update, info.SectorKeyCID != nil)
|
||||
}
|
||||
|
||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, update, cctx.Bool("slow"))
|
||||
bad, err := sapi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -475,3 +513,50 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
return tw.Flush()
|
||||
},
|
||||
}
|
||||
|
||||
var provingComputeCmd = &cli.Command{
|
||||
Name: "compute",
|
||||
Subcommands: []*cli.Command{
|
||||
provingComputeWindowPoStCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var provingComputeWindowPoStCmd = &cli.Command{
|
||||
Name: "window-post",
|
||||
Usage: "Compute WindowPoSt for a specific deadline",
|
||||
Description: `Note: This command is intended to be used to verify PoSt compute performance.
|
||||
It will not send any messages to the chain.`,
|
||||
ArgsUsage: "[deadline index]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.Args().Len() != 1 {
|
||||
return xerrors.Errorf("must pass deadline index")
|
||||
}
|
||||
|
||||
dlIdx, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("could not parse deadline index: %w", err)
|
||||
}
|
||||
|
||||
sapi, scloser, err := lcli.GetStorageMinerAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer scloser()
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
start := time.Now()
|
||||
res, err := sapi.ComputeWindowPoSt(ctx, dlIdx, types.EmptyTSK)
|
||||
fmt.Printf("Took %s\n", time.Now().Sub(start))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jr, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(jr))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user