Merge pull request #9273 from filecoin-project/release/v1.17.1
build: release: v1.17.1
This commit is contained in:
commit
8db6a939c1
@ -46,6 +46,35 @@ commands:
|
|||||||
steps:
|
steps:
|
||||||
- run: sudo apt-get update
|
- run: sudo apt-get update
|
||||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.darwin>>
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Install Go
|
||||||
|
command: |
|
||||||
|
curl https://dl.google.com/go/go1.17.9.darwin-amd64.pkg -o /tmp/go.pkg && \
|
||||||
|
sudo installer -pkg /tmp/go.pkg -target /
|
||||||
|
- run:
|
||||||
|
name: Export Go
|
||||||
|
command: |
|
||||||
|
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||||
|
- run: go version
|
||||||
|
- run:
|
||||||
|
name: Install pkg-config, goreleaser, and sha512sum
|
||||||
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config goreleaser/tap/goreleaser coreutils
|
||||||
|
- run:
|
||||||
|
name: Install Rust
|
||||||
|
command: |
|
||||||
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
|
- run:
|
||||||
|
name: Install hwloc
|
||||||
|
command: |
|
||||||
|
mkdir ~/hwloc
|
||||||
|
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||||
|
cd ~/hwloc
|
||||||
|
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||||
|
cd hwloc-2.4.1
|
||||||
|
./configure && make && sudo make install
|
||||||
- run: git submodule sync
|
- run: git submodule sync
|
||||||
- run: git submodule update --init
|
- run: git submodule update --init
|
||||||
download-params:
|
download-params:
|
||||||
@ -77,6 +106,16 @@ commands:
|
|||||||
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||||
chmod +x /usr/local/bin/ipfs
|
chmod +x /usr/local/bin/ipfs
|
||||||
|
install_ipfs_macos:
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
|
tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
|
pushd kubo
|
||||||
|
sudo bash install.sh
|
||||||
|
popd
|
||||||
|
rm -rf kubo/
|
||||||
|
rm kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
git_fetch_all_tags:
|
git_fetch_all_tags:
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
@ -173,16 +212,6 @@ jobs:
|
|||||||
type: string
|
type: string
|
||||||
default: standard-verbose
|
default: standard-verbose
|
||||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||||
coverage:
|
|
||||||
type: string
|
|
||||||
default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/...
|
|
||||||
description: Coverage flag. Set to the empty string to disable.
|
|
||||||
codecov-upload:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: |
|
|
||||||
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
|
|
||||||
set as an environment variable for private projects.
|
|
||||||
executor: << parameters.executor >>
|
executor: << parameters.executor >>
|
||||||
steps:
|
steps:
|
||||||
- install-deps
|
- install-deps
|
||||||
@ -205,7 +234,6 @@ jobs:
|
|||||||
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||||
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||||
-- \
|
-- \
|
||||||
<< parameters.coverage >> \
|
|
||||||
<< parameters.go-test-flags >> \
|
<< parameters.go-test-flags >> \
|
||||||
<< parameters.target >>
|
<< parameters.target >>
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
@ -213,13 +241,6 @@ jobs:
|
|||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||||
- when:
|
|
||||||
condition: << parameters.codecov-upload >>
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
shell: /bin/bash -eo pipefail
|
|
||||||
command: |
|
|
||||||
bash <(curl -s https://codecov.io/bash)
|
|
||||||
|
|
||||||
test-conformance:
|
test-conformance:
|
||||||
description: |
|
description: |
|
||||||
@ -353,58 +374,42 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: "trigger payment channel stress testplan on taas"
|
name: "trigger payment channel stress testplan on taas"
|
||||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
|
||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
|
parameters:
|
||||||
|
publish:
|
||||||
|
default: false
|
||||||
|
description: publish github release and homebrew?
|
||||||
|
type: boolean
|
||||||
macos:
|
macos:
|
||||||
xcode: "12.5.0"
|
xcode: "13.4.1"
|
||||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
steps:
|
steps:
|
||||||
- prepare:
|
- prepare:
|
||||||
linux: false
|
linux: false
|
||||||
darwin: true
|
darwin: true
|
||||||
- run:
|
- install_ipfs_macos
|
||||||
name: Install go
|
|
||||||
command: |
|
|
||||||
curl -O https://dl.google.com/go/go1.17.9.darwin-amd64.pkg && \
|
|
||||||
sudo installer -pkg go1.17.9.darwin-amd64.pkg -target /
|
|
||||||
- run:
|
|
||||||
name: Install pkg-config
|
|
||||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
|
||||||
- run: go version
|
|
||||||
- run:
|
|
||||||
name: Install Rust
|
|
||||||
command: |
|
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
|
||||||
- run:
|
|
||||||
name: Install hwloc
|
|
||||||
command: |
|
|
||||||
mkdir ~/hwloc
|
|
||||||
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
|
||||||
cd ~/hwloc
|
|
||||||
tar -xvzpf hwloc-2.4.1.tar.gz
|
|
||||||
cd hwloc-2.4.1
|
|
||||||
./configure && make && sudo make install
|
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
name: restore cargo cache
|
name: restore cargo cache
|
||||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
- run:
|
- when:
|
||||||
command: make build
|
condition: << parameters.publish >>
|
||||||
no_output_timeout: 30m
|
steps:
|
||||||
- run:
|
- run: goreleaser release --rm-dist
|
||||||
name: check tag and version output match
|
- run: ./scripts/generate-checksums.sh
|
||||||
command: ./scripts/version-check.sh ./lotus
|
- run: ./scripts/publish-checksums.sh
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not: << parameters.publish >>
|
||||||
|
steps:
|
||||||
|
- run: goreleaser release --rm-dist --snapshot
|
||||||
|
- run: ./scripts/generate-checksums.sh
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: dist
|
||||||
- store_artifacts:
|
|
||||||
path: lotus-miner
|
|
||||||
- store_artifacts:
|
|
||||||
path: lotus-worker
|
|
||||||
- run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
|
|
||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
root: "."
|
root: "."
|
||||||
paths:
|
paths:
|
||||||
- darwin
|
- dist
|
||||||
- save_cache:
|
- save_cache:
|
||||||
name: save cargo cache
|
name: save cargo cache
|
||||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
@ -541,10 +546,6 @@ jobs:
|
|||||||
default: false
|
default: false
|
||||||
description: publish linux binaries?
|
description: publish linux binaries?
|
||||||
type: boolean
|
type: boolean
|
||||||
darwin:
|
|
||||||
default: false
|
|
||||||
description: publish darwin binaries?
|
|
||||||
type: boolean
|
|
||||||
appimage:
|
appimage:
|
||||||
default: false
|
default: false
|
||||||
description: publish appimage binaries?
|
description: publish appimage binaries?
|
||||||
@ -564,11 +565,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: ./scripts/build-arch-bundle.sh linux
|
- run: ./scripts/build-arch-bundle.sh linux
|
||||||
- run: ./scripts/publish-arch-release.sh linux
|
- run: ./scripts/publish-arch-release.sh linux
|
||||||
- when:
|
|
||||||
condition: << parameters.darwin>>
|
|
||||||
steps:
|
|
||||||
- run: ./scripts/build-arch-bundle.sh darwin
|
|
||||||
- run: ./scripts/publish-arch-release.sh darwin
|
|
||||||
- when:
|
- when:
|
||||||
condition: << parameters.appimage >>
|
condition: << parameters.appimage >>
|
||||||
steps:
|
steps:
|
||||||
@ -585,17 +581,29 @@ jobs:
|
|||||||
type: string
|
type: string
|
||||||
default: "edge"
|
default: "edge"
|
||||||
description: snapcraft channel
|
description: snapcraft channel
|
||||||
|
snap-name:
|
||||||
|
type: string
|
||||||
|
default: 'lotus-filecoin'
|
||||||
|
description: name of snap in snap store
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: install snapcraft
|
name: Install snapcraft
|
||||||
command: sudo snap install snapcraft --classic
|
command: sudo snap install snapcraft --classic
|
||||||
- run:
|
- run:
|
||||||
name: build snap
|
name: Build << parameters.snap-name >> snap
|
||||||
command: snapcraft --use-lxd
|
command: |
|
||||||
|
if [ "<< parameters.snap-name >>" != 'lotus-filecoin' ]; then
|
||||||
|
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > edited-snapcraft.yaml
|
||||||
|
mv edited-snapcraft.yaml snap/snapcraft.yaml
|
||||||
|
fi
|
||||||
|
|
||||||
|
snapcraft --use-lxd --debug
|
||||||
- run:
|
- run:
|
||||||
name: publish snap
|
name: Publish snap to << parameters.channel >> channel
|
||||||
command: snapcraft push *.snap --release << parameters.channel >>
|
shell: /bin/bash -o pipefail
|
||||||
|
command: |
|
||||||
|
snapcraft upload *.snap --release << parameters.channel >>
|
||||||
|
|
||||||
build-and-push-image:
|
build-and-push-image:
|
||||||
description: build and push docker images to public AWS ECR registry
|
description: build and push docker images to public AWS ECR registry
|
||||||
@ -747,26 +755,32 @@ jobs:
|
|||||||
name: docker build
|
name: docker build
|
||||||
command: |
|
command: |
|
||||||
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
||||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
fi
|
fi
|
||||||
if [[ ! -z $CIRCLE_TAG ]]; then
|
if [[ ! -z $CIRCLE_TAG ]]; then
|
||||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
fi
|
fi
|
||||||
- run:
|
- run:
|
||||||
name: docker push
|
name: docker push
|
||||||
command: |
|
command: |
|
||||||
docker push filecoin/lotus:<< parameters.tag >>
|
docker push filecoin/lotus:<< parameters.tag >>
|
||||||
|
docker push filecoin/lotus-gateway:<< parameters.tag >>
|
||||||
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
||||||
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
if [[ ! -z $CIRCLE_SHA1 ]]; then
|
||||||
docker push filecoin/lotus:$CIRCLE_SHA1
|
docker push filecoin/lotus:$CIRCLE_SHA1
|
||||||
|
docker push filecoin/lotus-gateway:$CIRCLE_SHA1
|
||||||
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
||||||
fi
|
fi
|
||||||
if [[ ! -z $CIRCLE_TAG ]]; then
|
if [[ ! -z $CIRCLE_TAG ]]; then
|
||||||
docker push filecoin/lotus:$CIRCLE_TAG
|
docker push filecoin/lotus:$CIRCLE_TAG
|
||||||
|
docker push filecoin/lotus-gateway:$CIRCLE_TAG
|
||||||
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -875,6 +889,11 @@ workflows:
|
|||||||
suite: itest-deals
|
suite: itest-deals
|
||||||
target: "./itests/deals_test.go"
|
target: "./itests/deals_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-gas_estimation
|
||||||
|
suite: itest-gas_estimation
|
||||||
|
target: "./itests/gas_estimation_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-gateway
|
name: test-itest-gateway
|
||||||
suite: itest-gateway
|
suite: itest-gateway
|
||||||
@ -905,11 +924,26 @@ workflows:
|
|||||||
suite: itest-multisig
|
suite: itest-multisig
|
||||||
target: "./itests/multisig_test.go"
|
target: "./itests/multisig_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-net
|
||||||
|
suite: itest-net
|
||||||
|
target: "./itests/net_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-nonce
|
name: test-itest-nonce
|
||||||
suite: itest-nonce
|
suite: itest-nonce
|
||||||
target: "./itests/nonce_test.go"
|
target: "./itests/nonce_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-path_detach_redeclare
|
||||||
|
suite: itest-path_detach_redeclare
|
||||||
|
target: "./itests/path_detach_redeclare_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-path_type_filters
|
||||||
|
suite: itest-path_type_filters
|
||||||
|
target: "./itests/path_type_filters_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-paych_api
|
name: test-itest-paych_api
|
||||||
suite: itest-paych_api
|
suite: itest-paych_api
|
||||||
@ -965,6 +999,11 @@ workflows:
|
|||||||
suite: itest-self_sent_txn
|
suite: itest-self_sent_txn
|
||||||
target: "./itests/self_sent_txn_test.go"
|
target: "./itests/self_sent_txn_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-splitstore
|
||||||
|
suite: itest-splitstore
|
||||||
|
target: "./itests/splitstore_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-tape
|
name: test-itest-tape
|
||||||
suite: itest-tape
|
suite: itest-tape
|
||||||
@ -985,6 +1024,11 @@ workflows:
|
|||||||
suite: itest-wdpost_dispute
|
suite: itest-wdpost_dispute
|
||||||
target: "./itests/wdpost_dispute_test.go"
|
target: "./itests/wdpost_dispute_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-wdpost_no_miner_storage
|
||||||
|
suite: itest-wdpost_no_miner_storage
|
||||||
|
target: "./itests/wdpost_no_miner_storage_test.go"
|
||||||
|
|
||||||
- test:
|
- test:
|
||||||
name: test-itest-wdpost
|
name: test-itest-wdpost
|
||||||
suite: itest-wdpost
|
suite: itest-wdpost
|
||||||
@ -1023,11 +1067,9 @@ workflows:
|
|||||||
proofs-log-test: "1"
|
proofs-log-test: "1"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
suite: conformance
|
suite: conformance
|
||||||
codecov-upload: false
|
|
||||||
target: "./conformance"
|
target: "./conformance"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
name: test-conformance-bleeding-edge
|
name: test-conformance-bleeding-edge
|
||||||
codecov-upload: false
|
|
||||||
suite: conformance-bleeding-edge
|
suite: conformance-bleeding-edge
|
||||||
target: "./conformance"
|
target: "./conformance"
|
||||||
vectors-branch: specs-actors-v7
|
vectors-branch: specs-actors-v7
|
||||||
@ -1054,10 +1096,20 @@ workflows:
|
|||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-lotus-soup
|
- build-lotus-soup
|
||||||
- build-macos:
|
- build-macos:
|
||||||
|
name: publish-macos
|
||||||
|
publish: true
|
||||||
filters:
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-macos:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-appimage:
|
- build-appimage:
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
@ -1066,18 +1118,6 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- publish:
|
|
||||||
name: publish-macos
|
|
||||||
darwin: true
|
|
||||||
requires:
|
|
||||||
- build-macos
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- publish:
|
- publish:
|
||||||
name: publish-linux
|
name: publish-linux
|
||||||
linux: true
|
linux: true
|
||||||
@ -1103,27 +1143,63 @@ workflows:
|
|||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-and-push-image:
|
- build-and-push-image:
|
||||||
|
name: build-and-push/lotus-all-in-one
|
||||||
dockerfile: Dockerfile.lotus
|
dockerfile: Dockerfile.lotus
|
||||||
path: .
|
path: .
|
||||||
repo: lotus-dev
|
repo: lotus-dev
|
||||||
tag: '${CIRCLE_SHA1:0:8}'
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
target: lotus-all-in-one
|
target: lotus-all-in-one
|
||||||
- build-and-push-image:
|
- build-and-push-image:
|
||||||
|
name: build-and-push/lotus-test
|
||||||
dockerfile: Dockerfile.lotus
|
dockerfile: Dockerfile.lotus
|
||||||
path: .
|
path: .
|
||||||
repo: lotus-test
|
repo: lotus-test
|
||||||
tag: '${CIRCLE_SHA1:0:8}'
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
target: lotus-test
|
target: lotus-test
|
||||||
- publish-snapcraft:
|
- publish-snapcraft:
|
||||||
name: publish-snapcraft-stable
|
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||||
channel: stable
|
channel: stable
|
||||||
|
snap-name: lotus-filecoin
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
- /.*/
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||||
|
channel: candidate
|
||||||
|
snap-name: lotus-filecoin
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus / stable)"
|
||||||
|
channel: stable
|
||||||
|
snap-name: lotus
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus / candidate)"
|
||||||
|
channel: candidate
|
||||||
|
snap-name: lotus
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
- publish-dockerhub:
|
- publish-dockerhub:
|
||||||
name: publish-dockerhub
|
name: publish-dockerhub
|
||||||
tag: stable
|
tag: stable
|
||||||
@ -1145,8 +1221,13 @@ workflows:
|
|||||||
- master
|
- master
|
||||||
jobs:
|
jobs:
|
||||||
- publish-snapcraft:
|
- publish-snapcraft:
|
||||||
name: publish-snapcraft-nightly
|
name: "Publish Snapcraft Nightly (lotus-filecoin / edge)"
|
||||||
channel: edge
|
channel: edge
|
||||||
|
snap-name: lotus-filecoin
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft Nightly (lotus / edge)"
|
||||||
|
channel: edge
|
||||||
|
snap-name: lotus
|
||||||
- publish-dockerhub:
|
- publish-dockerhub:
|
||||||
name: publish-dockerhub-nightly
|
name: publish-dockerhub-nightly
|
||||||
tag: nightly
|
tag: nightly
|
||||||
|
@ -46,6 +46,35 @@ commands:
|
|||||||
steps:
|
steps:
|
||||||
- run: sudo apt-get update
|
- run: sudo apt-get update
|
||||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.darwin>>
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Install Go
|
||||||
|
command: |
|
||||||
|
curl https://dl.google.com/go/go1.17.9.darwin-amd64.pkg -o /tmp/go.pkg && \
|
||||||
|
sudo installer -pkg /tmp/go.pkg -target /
|
||||||
|
- run:
|
||||||
|
name: Export Go
|
||||||
|
command: |
|
||||||
|
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||||
|
- run: go version
|
||||||
|
- run:
|
||||||
|
name: Install pkg-config, goreleaser, and sha512sum
|
||||||
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config goreleaser/tap/goreleaser coreutils
|
||||||
|
- run:
|
||||||
|
name: Install Rust
|
||||||
|
command: |
|
||||||
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
|
- run:
|
||||||
|
name: Install hwloc
|
||||||
|
command: |
|
||||||
|
mkdir ~/hwloc
|
||||||
|
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||||
|
cd ~/hwloc
|
||||||
|
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||||
|
cd hwloc-2.4.1
|
||||||
|
./configure && make && sudo make install
|
||||||
- run: git submodule sync
|
- run: git submodule sync
|
||||||
- run: git submodule update --init
|
- run: git submodule update --init
|
||||||
download-params:
|
download-params:
|
||||||
@ -77,6 +106,16 @@ commands:
|
|||||||
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||||
chmod +x /usr/local/bin/ipfs
|
chmod +x /usr/local/bin/ipfs
|
||||||
|
install_ipfs_macos:
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
|
tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
|
pushd kubo
|
||||||
|
sudo bash install.sh
|
||||||
|
popd
|
||||||
|
rm -rf kubo/
|
||||||
|
rm kubo_v0.14.0_darwin-amd64.tar.gz
|
||||||
git_fetch_all_tags:
|
git_fetch_all_tags:
|
||||||
steps:
|
steps:
|
||||||
- run:
|
- run:
|
||||||
@ -173,16 +212,6 @@ jobs:
|
|||||||
type: string
|
type: string
|
||||||
default: standard-verbose
|
default: standard-verbose
|
||||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||||
coverage:
|
|
||||||
type: string
|
|
||||||
default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/...
|
|
||||||
description: Coverage flag. Set to the empty string to disable.
|
|
||||||
codecov-upload:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: |
|
|
||||||
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
|
|
||||||
set as an environment variable for private projects.
|
|
||||||
executor: << parameters.executor >>
|
executor: << parameters.executor >>
|
||||||
steps:
|
steps:
|
||||||
- install-deps
|
- install-deps
|
||||||
@ -205,7 +234,6 @@ jobs:
|
|||||||
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||||
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||||
-- \
|
-- \
|
||||||
<< parameters.coverage >> \
|
|
||||||
<< parameters.go-test-flags >> \
|
<< parameters.go-test-flags >> \
|
||||||
<< parameters.target >>
|
<< parameters.target >>
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
@ -213,13 +241,6 @@ jobs:
|
|||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||||
- when:
|
|
||||||
condition: << parameters.codecov-upload >>
|
|
||||||
steps:
|
|
||||||
- run:
|
|
||||||
shell: /bin/bash -eo pipefail
|
|
||||||
command: |
|
|
||||||
bash <(curl -s https://codecov.io/bash)
|
|
||||||
|
|
||||||
test-conformance:
|
test-conformance:
|
||||||
description: |
|
description: |
|
||||||
@ -353,58 +374,42 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: "trigger payment channel stress testplan on taas"
|
name: "trigger payment channel stress testplan on taas"
|
||||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
|
||||||
build-macos:
|
build-macos:
|
||||||
description: build darwin lotus binary
|
description: build darwin lotus binary
|
||||||
|
parameters:
|
||||||
|
publish:
|
||||||
|
default: false
|
||||||
|
description: publish github release and homebrew?
|
||||||
|
type: boolean
|
||||||
macos:
|
macos:
|
||||||
xcode: "12.5.0"
|
xcode: "13.4.1"
|
||||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
steps:
|
steps:
|
||||||
- prepare:
|
- prepare:
|
||||||
linux: false
|
linux: false
|
||||||
darwin: true
|
darwin: true
|
||||||
- run:
|
- install_ipfs_macos
|
||||||
name: Install go
|
|
||||||
command: |
|
|
||||||
curl -O https://dl.google.com/go/go1.17.9.darwin-amd64.pkg && \
|
|
||||||
sudo installer -pkg go1.17.9.darwin-amd64.pkg -target /
|
|
||||||
- run:
|
|
||||||
name: Install pkg-config
|
|
||||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
|
||||||
- run: go version
|
|
||||||
- run:
|
|
||||||
name: Install Rust
|
|
||||||
command: |
|
|
||||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
|
||||||
- run:
|
|
||||||
name: Install hwloc
|
|
||||||
command: |
|
|
||||||
mkdir ~/hwloc
|
|
||||||
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
|
||||||
cd ~/hwloc
|
|
||||||
tar -xvzpf hwloc-2.4.1.tar.gz
|
|
||||||
cd hwloc-2.4.1
|
|
||||||
./configure && make && sudo make install
|
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
name: restore cargo cache
|
name: restore cargo cache
|
||||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
- run:
|
- when:
|
||||||
command: make build
|
condition: << parameters.publish >>
|
||||||
no_output_timeout: 30m
|
steps:
|
||||||
- run:
|
- run: goreleaser release --rm-dist
|
||||||
name: check tag and version output match
|
- run: ./scripts/generate-checksums.sh
|
||||||
command: ./scripts/version-check.sh ./lotus
|
- run: ./scripts/publish-checksums.sh
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not: << parameters.publish >>
|
||||||
|
steps:
|
||||||
|
- run: goreleaser release --rm-dist --snapshot
|
||||||
|
- run: ./scripts/generate-checksums.sh
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: lotus
|
path: dist
|
||||||
- store_artifacts:
|
|
||||||
path: lotus-miner
|
|
||||||
- store_artifacts:
|
|
||||||
path: lotus-worker
|
|
||||||
- run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
|
|
||||||
- persist_to_workspace:
|
- persist_to_workspace:
|
||||||
root: "."
|
root: "."
|
||||||
paths:
|
paths:
|
||||||
- darwin
|
- dist
|
||||||
- save_cache:
|
- save_cache:
|
||||||
name: save cargo cache
|
name: save cargo cache
|
||||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
@ -541,10 +546,6 @@ jobs:
|
|||||||
default: false
|
default: false
|
||||||
description: publish linux binaries?
|
description: publish linux binaries?
|
||||||
type: boolean
|
type: boolean
|
||||||
darwin:
|
|
||||||
default: false
|
|
||||||
description: publish darwin binaries?
|
|
||||||
type: boolean
|
|
||||||
appimage:
|
appimage:
|
||||||
default: false
|
default: false
|
||||||
description: publish appimage binaries?
|
description: publish appimage binaries?
|
||||||
@ -564,11 +565,6 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- run: ./scripts/build-arch-bundle.sh linux
|
- run: ./scripts/build-arch-bundle.sh linux
|
||||||
- run: ./scripts/publish-arch-release.sh linux
|
- run: ./scripts/publish-arch-release.sh linux
|
||||||
- when:
|
|
||||||
condition: << parameters.darwin>>
|
|
||||||
steps:
|
|
||||||
- run: ./scripts/build-arch-bundle.sh darwin
|
|
||||||
- run: ./scripts/publish-arch-release.sh darwin
|
|
||||||
- when:
|
- when:
|
||||||
condition: << parameters.appimage >>
|
condition: << parameters.appimage >>
|
||||||
steps:
|
steps:
|
||||||
@ -585,17 +581,29 @@ jobs:
|
|||||||
type: string
|
type: string
|
||||||
default: "edge"
|
default: "edge"
|
||||||
description: snapcraft channel
|
description: snapcraft channel
|
||||||
|
snap-name:
|
||||||
|
type: string
|
||||||
|
default: 'lotus-filecoin'
|
||||||
|
description: name of snap in snap store
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: install snapcraft
|
name: Install snapcraft
|
||||||
command: sudo snap install snapcraft --classic
|
command: sudo snap install snapcraft --classic
|
||||||
- run:
|
- run:
|
||||||
name: build snap
|
name: Build << parameters.snap-name >> snap
|
||||||
command: snapcraft --use-lxd
|
command: |
|
||||||
|
if [ "<< parameters.snap-name >>" != 'lotus-filecoin' ]; then
|
||||||
|
cat snap/snapcraft.yaml | sed 's/lotus-filecoin/lotus/' > edited-snapcraft.yaml
|
||||||
|
mv edited-snapcraft.yaml snap/snapcraft.yaml
|
||||||
|
fi
|
||||||
|
|
||||||
|
snapcraft --use-lxd --debug
|
||||||
- run:
|
- run:
|
||||||
name: publish snap
|
name: Publish snap to << parameters.channel >> channel
|
||||||
command: snapcraft push *.snap --release << parameters.channel >>
|
shell: /bin/bash -o pipefail
|
||||||
|
command: |
|
||||||
|
snapcraft upload *.snap --release << parameters.channel >>
|
||||||
|
|
||||||
build-and-push-image:
|
build-and-push-image:
|
||||||
description: build and push docker images to public AWS ECR registry
|
description: build and push docker images to public AWS ECR registry
|
||||||
@ -747,26 +755,32 @@ jobs:
|
|||||||
name: docker build
|
name: docker build
|
||||||
command: |
|
command: |
|
||||||
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
||||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||||
fi
|
fi
|
||||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
|
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||||
fi
|
fi
|
||||||
- run:
|
- run:
|
||||||
name: docker push
|
name: docker push
|
||||||
command: |
|
command: |
|
||||||
docker push filecoin/lotus:<< parameters.tag >>
|
docker push filecoin/lotus:<< parameters.tag >>
|
||||||
|
docker push filecoin/lotus-gateway:<< parameters.tag >>
|
||||||
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
||||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||||
docker push filecoin/lotus:$CIRCLE_SHA1
|
docker push filecoin/lotus:$CIRCLE_SHA1
|
||||||
|
docker push filecoin/lotus-gateway:$CIRCLE_SHA1
|
||||||
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
||||||
fi
|
fi
|
||||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||||
docker push filecoin/lotus:$CIRCLE_TAG
|
docker push filecoin/lotus:$CIRCLE_TAG
|
||||||
|
docker push filecoin/lotus-gateway:$CIRCLE_TAG
|
||||||
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -803,11 +817,9 @@ workflows:
|
|||||||
proofs-log-test: "1"
|
proofs-log-test: "1"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
suite: conformance
|
suite: conformance
|
||||||
codecov-upload: false
|
|
||||||
target: "./conformance"
|
target: "./conformance"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
name: test-conformance-bleeding-edge
|
name: test-conformance-bleeding-edge
|
||||||
codecov-upload: false
|
|
||||||
suite: conformance-bleeding-edge
|
suite: conformance-bleeding-edge
|
||||||
target: "./conformance"
|
target: "./conformance"
|
||||||
vectors-branch: specs-actors-v7
|
vectors-branch: specs-actors-v7
|
||||||
@ -834,10 +846,20 @@ workflows:
|
|||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-lotus-soup
|
- build-lotus-soup
|
||||||
- build-macos:
|
- build-macos:
|
||||||
|
name: publish-macos
|
||||||
|
publish: true
|
||||||
filters:
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-macos:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-appimage:
|
- build-appimage:
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
@ -846,18 +868,6 @@ workflows:
|
|||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- publish:
|
|
||||||
name: publish-macos
|
|
||||||
darwin: true
|
|
||||||
requires:
|
|
||||||
- build-macos
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
ignore:
|
|
||||||
- /.*/
|
|
||||||
tags:
|
|
||||||
only:
|
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
|
||||||
- publish:
|
- publish:
|
||||||
name: publish-linux
|
name: publish-linux
|
||||||
linux: true
|
linux: true
|
||||||
@ -883,27 +893,63 @@ workflows:
|
|||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- build-and-push-image:
|
- build-and-push-image:
|
||||||
|
name: build-and-push/lotus-all-in-one
|
||||||
dockerfile: Dockerfile.lotus
|
dockerfile: Dockerfile.lotus
|
||||||
path: .
|
path: .
|
||||||
repo: lotus-dev
|
repo: lotus-dev
|
||||||
tag: '${CIRCLE_SHA1:0:8}'
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
target: lotus-all-in-one
|
target: lotus-all-in-one
|
||||||
- build-and-push-image:
|
- build-and-push-image:
|
||||||
|
name: build-and-push/lotus-test
|
||||||
dockerfile: Dockerfile.lotus
|
dockerfile: Dockerfile.lotus
|
||||||
path: .
|
path: .
|
||||||
repo: lotus-test
|
repo: lotus-test
|
||||||
tag: '${CIRCLE_SHA1:0:8}'
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
target: lotus-test
|
target: lotus-test
|
||||||
- publish-snapcraft:
|
- publish-snapcraft:
|
||||||
name: publish-snapcraft-stable
|
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||||
channel: stable
|
channel: stable
|
||||||
|
snap-name: lotus-filecoin
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
- /.*/
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||||
|
channel: candidate
|
||||||
|
snap-name: lotus-filecoin
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus / stable)"
|
||||||
|
channel: stable
|
||||||
|
snap-name: lotus
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft (lotus / candidate)"
|
||||||
|
channel: candidate
|
||||||
|
snap-name: lotus
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
- publish-dockerhub:
|
- publish-dockerhub:
|
||||||
name: publish-dockerhub
|
name: publish-dockerhub
|
||||||
tag: stable
|
tag: stable
|
||||||
@ -925,8 +971,13 @@ workflows:
|
|||||||
- master
|
- master
|
||||||
jobs:
|
jobs:
|
||||||
- publish-snapcraft:
|
- publish-snapcraft:
|
||||||
name: publish-snapcraft-nightly
|
name: "Publish Snapcraft Nightly (lotus-filecoin / edge)"
|
||||||
channel: edge
|
channel: edge
|
||||||
|
snap-name: lotus-filecoin
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: "Publish Snapcraft Nightly (lotus / edge)"
|
||||||
|
channel: edge
|
||||||
|
snap-name: lotus
|
||||||
- publish-dockerhub:
|
- publish-dockerhub:
|
||||||
name: publish-dockerhub-nightly
|
name: publish-dockerhub-nightly
|
||||||
tag: nightly
|
tag: nightly
|
||||||
|
74
.codecov.yml
74
.codecov.yml
@ -1,74 +0,0 @@
|
|||||||
ignore:
|
|
||||||
# Auto generated
|
|
||||||
- "^.*_gen.go$"
|
|
||||||
- "^.*/mock_full.go$"
|
|
||||||
# Old actors.
|
|
||||||
- "^chain/actors/builtin/[^/]*/(message|state|v)[0-4]\\.go$" # We test the latest version only.
|
|
||||||
# Tests
|
|
||||||
- "api/test/**"
|
|
||||||
- "conformance/**"
|
|
||||||
# Generators
|
|
||||||
- "gen/**"
|
|
||||||
- "chain/actors/agen/**"
|
|
||||||
# Non-critical utilities
|
|
||||||
- "api/docgen/**"
|
|
||||||
- "api/docgen-openrpc/**"
|
|
||||||
coverage:
|
|
||||||
status:
|
|
||||||
patch: off
|
|
||||||
project:
|
|
||||||
threshold: 1%
|
|
||||||
tools-and-tests:
|
|
||||||
target: auto
|
|
||||||
threshold: 1%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "testplans"
|
|
||||||
- "tools"
|
|
||||||
- "system"
|
|
||||||
- "snap"
|
|
||||||
- "lotuspond"
|
|
||||||
- "conformance"
|
|
||||||
- "scripts"
|
|
||||||
- "gen"
|
|
||||||
- "build"
|
|
||||||
markets:
|
|
||||||
target: auto
|
|
||||||
threshold: 1%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "markets"
|
|
||||||
- "paychmgr"
|
|
||||||
miner:
|
|
||||||
target: auto
|
|
||||||
threshold: 1.5%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "miner"
|
|
||||||
- "storage"
|
|
||||||
chain:
|
|
||||||
target: auto
|
|
||||||
threshold: 1%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "chain"
|
|
||||||
node:
|
|
||||||
target: auto
|
|
||||||
threshold: 1%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "node"
|
|
||||||
- "blockstore"
|
|
||||||
- "metrics"
|
|
||||||
- "lib"
|
|
||||||
- "genesis"
|
|
||||||
- "gateway"
|
|
||||||
- "api"
|
|
||||||
- "journal"
|
|
||||||
cli:
|
|
||||||
target: auto
|
|
||||||
threshold: 1%
|
|
||||||
informational: true
|
|
||||||
paths:
|
|
||||||
- "cli"
|
|
||||||
- "cmd"
|
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -51,3 +51,5 @@ scratchpad
|
|||||||
|
|
||||||
build/builtin-actors/v*
|
build/builtin-actors/v*
|
||||||
build/builtin-actors/*.car
|
build/builtin-actors/*.car
|
||||||
|
|
||||||
|
dist/
|
||||||
|
165
.goreleaser.yaml
Normal file
165
.goreleaser.yaml
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
project_name: lotus
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod tidy
|
||||||
|
- make deps
|
||||||
|
|
||||||
|
universal_binaries:
|
||||||
|
- id: lotus
|
||||||
|
replace: true
|
||||||
|
name_template: lotus
|
||||||
|
ids:
|
||||||
|
- lotus_darwin_amd64
|
||||||
|
- lotus_darwin_arm64
|
||||||
|
- id: lotus-miner
|
||||||
|
replace: true
|
||||||
|
name_template: lotus-miner
|
||||||
|
ids:
|
||||||
|
- lotus-miner_darwin_amd64
|
||||||
|
- lotus-miner_darwin_arm64
|
||||||
|
- id: lotus-worker
|
||||||
|
replace: true
|
||||||
|
name_template: lotus-worker
|
||||||
|
ids:
|
||||||
|
- lotus-worker_darwin_amd64
|
||||||
|
- lotus-worker_darwin_arm64
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: lotus_darwin_amd64
|
||||||
|
main: ./cmd/lotus
|
||||||
|
binary: lotus
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
- id: lotus-miner_darwin_amd64
|
||||||
|
main: ./cmd/lotus-miner
|
||||||
|
binary: lotus-miner
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
- id: lotus-worker_darwin_amd64
|
||||||
|
main: ./cmd/lotus-worker
|
||||||
|
binary: lotus-worker
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
- id: lotus_darwin_arm64
|
||||||
|
main: ./cmd/lotus
|
||||||
|
binary: lotus
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- arm64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
- CPATH=/opt/homebrew/include
|
||||||
|
- LIBRARY_PATH=/opt/homebrew/lib
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
- id: lotus-miner_darwin_arm64
|
||||||
|
main: ./cmd/lotus-miner
|
||||||
|
binary: lotus-miner
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- arm64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
- CPATH=/opt/homebrew/include
|
||||||
|
- LIBRARY_PATH=/opt/homebrew/lib
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
- id: lotus-worker_darwin_arm64
|
||||||
|
main: ./cmd/lotus-worker
|
||||||
|
binary: lotus-worker
|
||||||
|
goos:
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- arm64
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- FFI_BUILD_FROM_SOURCE=1
|
||||||
|
- CPATH=/opt/homebrew/include
|
||||||
|
- LIBRARY_PATH=/opt/homebrew/lib
|
||||||
|
ldflags:
|
||||||
|
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
# - id: linux
|
||||||
|
# main: ./cmd/lotus
|
||||||
|
# binary: lotus
|
||||||
|
# goos:
|
||||||
|
# - linux
|
||||||
|
# goarch:
|
||||||
|
# - amd64
|
||||||
|
# env:
|
||||||
|
# - CGO_ENABLED=1
|
||||||
|
# ldflags:
|
||||||
|
# - -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- id: primary
|
||||||
|
format: tar.gz
|
||||||
|
wrap_in_directory: true
|
||||||
|
files:
|
||||||
|
# this is a dumb but required hack so it doesn't include the default files
|
||||||
|
# https://github.com/goreleaser/goreleaser/issues/602
|
||||||
|
- _n_o_n_e_*
|
||||||
|
|
||||||
|
release:
|
||||||
|
github:
|
||||||
|
owner: filecoin-project
|
||||||
|
name: lotus
|
||||||
|
prerelease: auto
|
||||||
|
name_template: "Release v{{.Version}}"
|
||||||
|
|
||||||
|
|
||||||
|
brews:
|
||||||
|
- tap:
|
||||||
|
owner: filecoin-project
|
||||||
|
name: homebrew-lotus
|
||||||
|
branch: master
|
||||||
|
ids:
|
||||||
|
- primary
|
||||||
|
install: |
|
||||||
|
bin.install "lotus"
|
||||||
|
bin.install "lotus-miner"
|
||||||
|
bin.install "lotus-worker"
|
||||||
|
test: |
|
||||||
|
system "#{bin}/lotus --version"
|
||||||
|
system "#{bin}/lotus-miner --version"
|
||||||
|
system "#{bin}/lotus-worker --version"
|
||||||
|
folder: Formula
|
||||||
|
homepage: "https://filecoin.io"
|
||||||
|
description: "A homebrew cask for installing filecoin-project/lotus on MacOS"
|
||||||
|
license: MIT
|
||||||
|
dependencies:
|
||||||
|
- name: pkg-config
|
||||||
|
- name: jq
|
||||||
|
- name: bzr
|
||||||
|
- name: hwloc
|
||||||
|
|
||||||
|
# produced manually so we can include cid checksums
|
||||||
|
checksum:
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
snapshot:
|
||||||
|
name_template: "{{ .Tag }}"
|
103
CHANGELOG.md
103
CHANGELOG.md
@ -1,5 +1,108 @@
|
|||||||
# Lotus changelog
|
# Lotus changelog
|
||||||
|
|
||||||
|
# v1.17.1 / 2022-09-06
|
||||||
|
|
||||||
|
This is an optional release of Lotus. This release introduces the [Splitstore v2 - beta](https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)(beta). Splitstore aims to reduce the node performance impact that's caused by the Filecoin's very large, and continuously growing datastore. Splitstore v2 introduces the coldstore auto prune/GC feature & some improvements for the hotstore. We welcome all lotus users to join the early testers and try the new Splitstore out, you can leave any feedback or report issues in [this discussion](https://github.com/filecoin-project/lotus/discussions/9179) or create an issue. As always, multiple small bug fixes, new features & improvements are also included in this release.
|
||||||
|
|
||||||
|
|
||||||
|
## New features
|
||||||
|
|
||||||
|
- feat:chain:splitstore auto prune ([filecoin-project/lotus#9123](https://github.com/filecoin-project/lotus/pull/9123))
|
||||||
|
- Trigger SplitStore chain prune on head events. [Link to the documentation](https://lotus.filecoin.io/lotus/manage/chain-management/#cold-store-garbage-collection)
|
||||||
|
- feat:chain:splitstore chain prune ([filecoin-project/lotus#9056](https://github.com/filecoin-project/lotus/pull/9056))
|
||||||
|
- Adds `chain prune` command to trigger manual garbage collection. [Link to the documentation](https://lotus.filecoin.io/lotus/manage/chain-management/#cold-store-garbage-collection)
|
||||||
|
- feat: storage: Path type filters ([filecoin-project/lotus#9013](https://github.com/filecoin-project/lotus/pull/9013))
|
||||||
|
- Adds new fields to `sectorstore.json` to allow file type filtering. [Link to the documentation](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/#filter-sector-types-1)
|
||||||
|
- feat: sealing: storage redeclare/detach ([filecoin-project/lotus#9032](https://github.com/filecoin-project/lotus/pull/9032))
|
||||||
|
- Adds new Lotus commands to detach and redeclare storage paths. [Link to the documentation](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/#detach-storage-paths)
|
||||||
|
- feat: worker: Add stop cmd for lotus worker ([filecoin-project/lotus#9101](https://github.com/filecoin-project/lotus/pull/9101))
|
||||||
|
- Adds new `lotus-worker stop` command. [Link to the documentation](https://lotus.filecoin.io/storage-providers/seal-workers/seal-workers/#stop-the-worker)
|
||||||
|
- feat: market: Add lotus-shed cmd to get total active deal storage ([filecoin-project/lotus#9113](https://github.com/filecoin-project/lotus/pull/9113))
|
||||||
|
- `get-deals-total-storage` - View the total storage available in all active market deals
|
||||||
|
- feat: wdpost: Envvar for limiting recovering sectors ([filecoin-project/lotus#9106](https://github.com/filecoin-project/lotus/pull/9106))
|
||||||
|
- Adds new envvar to limit the number of sectors declared in the recover message
|
||||||
|
|
||||||
|
|
||||||
|
## Improvements
|
||||||
|
|
||||||
|
- feat: sealing: Allow overriding worker hostname ([filecoin-project/lotus#9116](https://github.com/filecoin-project/lotus/pull/9116))
|
||||||
|
- feat: build: run fiximports on make actors-gen ([filecoin-project/lotus#9114](https://github.com/filecoin-project/lotus/pull/9114))
|
||||||
|
- feat: FVM: always enable tracing for user-triggered executions ([filecoin-project/lotus#9036](https://github.com/filecoin-project/lotus/pull/9036))
|
||||||
|
- feat: miner cli: proving deadline command enchantments ([filecoin-project/lotus#9109](https://github.com/filecoin-project/lotus/pull/9109))
|
||||||
|
- FVM: Use MaxInt64 for Implicit Message gas limits ([filecoin-project/lotus#9037](https://github.com/filecoin-project/lotus/pull/9037))
|
||||||
|
- lotus shed addr decode
|
||||||
|
- push lotus-gateway to docker hub ([filecoin-project/lotus#8969](https://github.com/filecoin-project/lotus/pull/8969))
|
||||||
|
- Review Response
|
||||||
|
- test: net: net and conngater tests ([filecoin-project/lotus#8084](https://github.com/filecoin-project/lotus/pull/8084))
|
||||||
|
- Update FFI ([filecoin-project/lotus#9139](https://github.com/filecoin-project/lotus/pull/9139))
|
||||||
|
|
||||||
|
## Bug Fixes
|
||||||
|
|
||||||
|
- backport: 9153: detach storage on worker shutdown ([filecoin-project/lotus#9127](https://github.com/filecoin-project/lotus/pull/9165))
|
||||||
|
- fix makegen
|
||||||
|
- fix: build: use GOCC when building lotus-fountain ([filecoin-project/lotus#9127](https://github.com/filecoin-project/lotus/pull/9127))
|
||||||
|
- fix: ci: Forgot a .sh on the end of a the new publish script ([filecoin-project/lotus#9088](https://github.com/filecoin-project/lotus/pull/9088))
|
||||||
|
- fix: cli: ./lotus-miner actor control list, if the owner is not account ([filecoin-project/lotus#9072](https://github.com/filecoin-project/lotus/pull/9072))
|
||||||
|
- fix: deps: update FFI to fix a slow memory leak ([filecoin-project/lotus#9042](https://github.com/filecoin-project/lotus/pull/9042))
|
||||||
|
- fix: FVM: record message applied metrics ([filecoin-project/lotus#9052](https://github.com/filecoin-project/lotus/pull/9052))
|
||||||
|
- fix: gas: estimate gas with a zero base-fee ([filecoin-project/lotus#8991](https://github.com/filecoin-project/lotus/pull/8991))
|
||||||
|
- fix: post: restrict recoveries per deadline ([filecoin-project/lotus#9111](https://github.com/filecoin-project/lotus/pull/9111))
|
||||||
|
- fix: sealing: Workaround for sealing bug ([filecoin-project/lotus#9043](https://github.com/filecoin-project/lotus/pull/9043))
|
||||||
|
- fix: storage: don't panic in getCommitCutoff when precommit is not found ([filecoin-project/lotus#9141](https://github.com/filecoin-project/lotus/pull/9141))
|
||||||
|
- fix: test: deflake TestQuotePriceForUnsealedRetrieval ([filecoin-project/lotus#9084](https://github.com/filecoin-project/lotus/pull/9084))
|
||||||
|
|
||||||
|
## Dependency Updates
|
||||||
|
|
||||||
|
- github.com/multiformats/go-multibase (v0.0.3 -> v0.1.1)
|
||||||
|
|
||||||
|
## Others
|
||||||
|
|
||||||
|
- chore: ci: Update xcode version for macos builds ([filecoin-project/lotus#9164)](https://github.com/filecoin-project/lotus/pull/9164))
|
||||||
|
- Merge branch 'docs/ysrotciv-desc'
|
||||||
|
- Merge branch 'feat/f8-worker-env'
|
||||||
|
- Merge branch 'LexLuthr-feat/minerWithdrawBalanceAPI'
|
||||||
|
- Merge branch 'LexLuthr-feat/SchedRemoveRequest'
|
||||||
|
- base256emoji ([filecoin-project/lotus#9038)](https://github.com/filecoin-project/lotus/pull/9038))
|
||||||
|
- chore: interop: update interop assets ([filecoin-project/lotus#9093)](https://github.com/filecoin-project/lotus/pull/9093))
|
||||||
|
- chore: merge: releases (v1.17.0) to master ([filecoin-project/lotus#9146)](https://github.com/filecoin-project/lotus/pull/9146))
|
||||||
|
- chore: sealer: Fixup typos ([filecoin-project/lotus#9040)](https://github.com/filecoin-project/lotus/pull/9040))
|
||||||
|
- chore:docs:remove readme reference to deprecated specs-actors ([filecoin-project/lotus#8984)](https://github.com/filecoin-project/lotus/pull/8984))
|
||||||
|
- ci : Change default shell options for snapcraft publish ([filecoin-project/lotus#9122)](https://github.com/filecoin-project/lotus/pull/9122))
|
||||||
|
- ci: More tweaks to snapcraft release process ([filecoin-project/lotus#9090)](https://github.com/filecoin-project/lotus/pull/9090))
|
||||||
|
- ci: Publish to both lotus and lotus-filecoin for snap ([filecoin-project/lotus#9119)](https://github.com/filecoin-project/lotus/pull/9119))
|
||||||
|
- ci: Run snap builds for lotus and lotus-filecoin in parallel ([filecoin-project/lotus#9133)](https://github.com/filecoin-project/lotus/pull/9133))
|
||||||
|
- ci: Switches goreleaser notes back to default (keep-existing) ([filecoin-project/lotus#9120)](https://github.com/filecoin-project/lotus/pull/9120))
|
||||||
|
- ci: update snapcraft and release flow logic ([filecoin-project/lotus#8994)](https://github.com/filecoin-project/lotus/pull/8994))
|
||||||
|
- ci: Use goreleaser to build macos universal binaries (including M1 macs) ([filecoin-project/lotus#9096)](https://github.com/filecoin-project/lotus/pull/9096))
|
||||||
|
- ci:testing:remove codecov ([filecoin-project/lotus#9062)](https://github.com/filecoin-project/lotus/pull/9062))
|
||||||
|
|
||||||
|
|
||||||
|
Contributors
|
||||||
|
|
||||||
|
| Contributor | Commits | Lines ± | Files Changed |
|
||||||
|
|-------------|---------|---------|---------------|
|
||||||
|
| Łukasz Magiera | 34 | +2329/-317 | 163 |
|
||||||
|
| ZenGround0 | 2 | +1527/-89 | 38 |
|
||||||
|
| Ian Davis | 14 | +751/-232 | 30 |
|
||||||
|
| LexLuthr | 17 | +480/-225 | 63 |
|
||||||
|
| TheMenko | 4 | +323/-61 | 5 |
|
||||||
|
| Aayush | 10 | +285/-92 | 30 |
|
||||||
|
| beck | 3 | +143/-93 | 3 |
|
||||||
|
| Steven Allen | 4 | +95/-75 | 9 |
|
||||||
|
| zenground0 | 5 | +44/-116 | 9 |
|
||||||
|
| Shrenuj Bansal | 7 | +136/-7 | 16 |
|
||||||
|
| Patrick Deuse | 3 | +76/-57 | 3 |
|
||||||
|
| Jennifer Wang | 3 | +6/-52 | 11 |
|
||||||
|
| zl | 2 | +20/-16 | 2 |
|
||||||
|
| Aayush Rajasekaran | 2 | +6/-6 | 2 |
|
||||||
|
| Clint Armstrong | 1 | +7/-3 | 1 |
|
||||||
|
| Cory Schwartz | 2 | +9/-0 | 2 |
|
||||||
|
| Jorropo | 1 | +3/-2 | 2 |
|
||||||
|
| Geoff Stuart | 1 | +5/-0 | 1 |
|
||||||
|
| Frank Y | 1 | +2/-2 | 2 |
|
||||||
|
| Aloxaf | 1 | +2/-2 | 1 |
|
||||||
|
|
||||||
|
|
||||||
# v1.17.0 / 2022-08-02
|
# v1.17.0 / 2022-08-02
|
||||||
|
|
||||||
This is an optional release of Lotus. This feature release introduces a lot of new sealing and scheduler improvements, and many other functionalities and bug fixes.
|
This is an optional release of Lotus. This feature release introduces a lot of new sealing and scheduler improvements, and many other functionalities and bug fixes.
|
||||||
|
11
Makefile
11
Makefile
@ -174,8 +174,8 @@ lotus-pond-app: lotus-pond-front lotus-pond
|
|||||||
|
|
||||||
lotus-fountain:
|
lotus-fountain:
|
||||||
rm -f lotus-fountain
|
rm -f lotus-fountain
|
||||||
go build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
|
$(GOCC) build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
$(GOCC) run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
||||||
.PHONY: lotus-fountain
|
.PHONY: lotus-fountain
|
||||||
BINS+=lotus-fountain
|
BINS+=lotus-fountain
|
||||||
|
|
||||||
@ -302,11 +302,14 @@ type-gen: api-gen
|
|||||||
method-gen: api-gen
|
method-gen: api-gen
|
||||||
(cd ./lotuspond/front/src/chain && $(GOCC) run ./methodgen.go)
|
(cd ./lotuspond/front/src/chain && $(GOCC) run ./methodgen.go)
|
||||||
|
|
||||||
actors-gen:
|
actors-code-gen:
|
||||||
$(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json
|
$(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json
|
||||||
$(GOCC) run ./chain/actors/agen
|
$(GOCC) run ./chain/actors/agen
|
||||||
$(GOCC) fmt ./...
|
$(GOCC) fmt ./...
|
||||||
|
|
||||||
|
actors-gen: actors-code-gen fiximports
|
||||||
|
.PHONY: actors-gen
|
||||||
|
|
||||||
bundle-gen:
|
bundle-gen:
|
||||||
$(GOCC) run ./gen/bundle
|
$(GOCC) run ./gen/bundle
|
||||||
$(GOCC) fmt ./build/...
|
$(GOCC) fmt ./build/...
|
||||||
@ -364,7 +367,7 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
|
|||||||
fiximports:
|
fiximports:
|
||||||
./scripts/fiximports
|
./scripts/fiximports
|
||||||
|
|
||||||
gen: actors-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci bundle-gen fiximports
|
gen: actors-code-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci bundle-gen fiximports
|
||||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||||
.PHONY: gen
|
.PHONY: gen
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ Please send an email to security@filecoin.org. See our [security policy](SECURIT
|
|||||||
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
|
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
|
||||||
|
|
||||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||||
- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
- [builtin-actors](https://github.com/filecoin-project/builtin-actors)
|
||||||
|
|
||||||
## Contribute
|
## Contribute
|
||||||
|
|
||||||
|
@ -169,6 +169,10 @@ type FullNode interface {
|
|||||||
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
|
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
|
||||||
|
|
||||||
|
// ChainPrune prunes the stored chain state and garbage collects; only supported if you
|
||||||
|
// are using the splitstore
|
||||||
|
ChainPrune(ctx context.Context, opts PruneOpts) error //perm:admin
|
||||||
|
|
||||||
// ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore
|
// ChainCheckBlockstore performs an (asynchronous) health check on the chain/state blockstore
|
||||||
// if supported by the underlying implementation.
|
// if supported by the underlying implementation.
|
||||||
ChainCheckBlockstore(context.Context) error //perm:admin
|
ChainCheckBlockstore(context.Context) error //perm:admin
|
||||||
@ -402,7 +406,7 @@ type FullNode interface {
|
|||||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
|
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
|
||||||
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
//
|
//
|
||||||
// If a tipset key is provided, and a replacing message is found on chain,
|
// If a tipset key is provided, and a replacing message is not found on chain,
|
||||||
// the method will return an error saying that the message wasn't found
|
// the method will return an error saying that the message wasn't found
|
||||||
//
|
//
|
||||||
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
@ -1219,3 +1223,8 @@ type MsigTransaction struct {
|
|||||||
|
|
||||||
Approved []address.Address
|
Approved []address.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PruneOpts struct {
|
||||||
|
MovingGC bool
|
||||||
|
RetainState int64
|
||||||
|
}
|
||||||
|
@ -48,6 +48,11 @@ type StorageMiner interface {
|
|||||||
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
|
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
|
||||||
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
|
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
|
||||||
|
|
||||||
|
// WithdrawBalance allows to withdraw balance from miner actor to owner address
|
||||||
|
// Specify amount as "0" to withdraw full balance. This method returns a message CID
|
||||||
|
// and does not wait for message execution
|
||||||
|
ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) //perm:admin
|
||||||
|
|
||||||
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
|
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
|
||||||
@ -145,14 +150,28 @@ type StorageMiner interface {
|
|||||||
// SealingSchedDiag dumps internal sealing scheduler state
|
// SealingSchedDiag dumps internal sealing scheduler state
|
||||||
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||||
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
||||||
|
//SealingSchedRemove removes a request from sealing pipeline
|
||||||
|
SealingRemoveRequest(ctx context.Context, schedId uuid.UUID) error //perm:admin
|
||||||
|
|
||||||
// SectorIndex
|
// paths.SectorIndex
|
||||||
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
|
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
|
||||||
|
StorageDetach(ctx context.Context, id storiface.ID, url string) error //perm:admin
|
||||||
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
|
||||||
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
|
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
|
||||||
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
||||||
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
||||||
|
// StorageFindSector returns list of paths where the specified sector files exist.
|
||||||
|
//
|
||||||
|
// If allowFetch is set, list of paths to which the sector can be fetched will also be returned.
|
||||||
|
// - Paths which have sector files locally (don't require fetching) will be listed first.
|
||||||
|
// - Paths which have sector files locally will not be filtered based on based on AllowTypes/DenyTypes.
|
||||||
|
// - Paths which require fetching will be filtered based on AllowTypes/DenyTypes. If multiple
|
||||||
|
// file types are specified, each type will be considered individually, and a union of all paths
|
||||||
|
// which can accommodate each file type will be returned.
|
||||||
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
|
||||||
|
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
||||||
|
// Paths with more weight and more % of free space are preferred.
|
||||||
|
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
||||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
@ -164,6 +183,10 @@ type StorageMiner interface {
|
|||||||
|
|
||||||
StorageAuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
|
StorageAuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
|
||||||
|
|
||||||
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
||||||
|
|
||||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||||
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
|
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
|
||||||
@ -261,8 +284,6 @@ type StorageMiner interface {
|
|||||||
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
|
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
|
||||||
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
|
||||||
|
|
||||||
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
|
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
|
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
|
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
|
||||||
|
@ -59,7 +59,11 @@ type Worker interface {
|
|||||||
// Storage / Other
|
// Storage / Other
|
||||||
Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
|
Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
|
||||||
|
|
||||||
|
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||||
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
StorageDetachAll(ctx context.Context) error //perm:admin
|
||||||
|
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
||||||
|
|
||||||
// SetEnabled marks the worker as enabled/disabled. Not that this setting
|
// SetEnabled marks the worker as enabled/disabled. Not that this setting
|
||||||
// may take a few seconds to propagate to task scheduler
|
// may take a few seconds to propagate to task scheduler
|
||||||
@ -76,6 +80,10 @@ type Worker interface {
|
|||||||
|
|
||||||
// Like ProcessSession, but returns an error when worker is disabled
|
// Like ProcessSession, but returns an error when worker is disabled
|
||||||
Session(context.Context) (uuid.UUID, error) //perm:admin
|
Session(context.Context) (uuid.UUID, error) //perm:admin
|
||||||
|
|
||||||
|
// Trigger shutdown
|
||||||
|
Shutdown(context.Context) error //perm:admin
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ storiface.WorkerCalls = *new(Worker)
|
var _ storiface.WorkerCalls = *new(Worker)
|
||||||
|
@ -272,6 +272,8 @@ func init() {
|
|||||||
Read: [storiface.FileTypes]uint{2, 3, 0},
|
Read: [storiface.FileTypes]uint{2, 3, 0},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
storifaceid := storiface.ID("1399aa04-2625-44b1-bad4-bd07b59b22c4")
|
||||||
|
addExample(&storifaceid)
|
||||||
|
|
||||||
// worker specific
|
// worker specific
|
||||||
addExample(storiface.AcquireMove)
|
addExample(storiface.AcquireMove)
|
||||||
|
@ -377,6 +377,20 @@ func (mr *MockFullNodeMockRecorder) ChainNotify(arg0 interface{}) *gomock.Call {
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainNotify", reflect.TypeOf((*MockFullNode)(nil).ChainNotify), arg0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChainPrune mocks base method.
|
||||||
|
func (m *MockFullNode) ChainPrune(arg0 context.Context, arg1 api.PruneOpts) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ChainPrune", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainPrune indicates an expected call of ChainPrune.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ChainPrune(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainPrune", reflect.TypeOf((*MockFullNode)(nil).ChainPrune), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
// ChainPutObj mocks base method.
|
// ChainPutObj mocks base method.
|
||||||
func (m *MockFullNode) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error {
|
func (m *MockFullNode) ChainPutObj(arg0 context.Context, arg1 blocks.Block) error {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
|
143
api/proxy_gen.go
143
api/proxy_gen.go
@ -145,6 +145,8 @@ type FullNodeStruct struct {
|
|||||||
|
|
||||||
ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"`
|
ChainNotify func(p0 context.Context) (<-chan []*HeadChange, error) `perm:"read"`
|
||||||
|
|
||||||
|
ChainPrune func(p0 context.Context, p1 PruneOpts) error `perm:"admin"`
|
||||||
|
|
||||||
ChainPutObj func(p0 context.Context, p1 blocks.Block) error `perm:"admin"`
|
ChainPutObj func(p0 context.Context, p1 blocks.Block) error `perm:"admin"`
|
||||||
|
|
||||||
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
|
ChainReadObj func(p0 context.Context, p1 cid.Cid) ([]byte, error) `perm:"read"`
|
||||||
@ -656,6 +658,8 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
ActorSectorSize func(p0 context.Context, p1 address.Address) (abi.SectorSize, error) `perm:"read"`
|
||||||
|
|
||||||
|
ActorWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||||
|
|
||||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||||
|
|
||||||
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
||||||
@ -800,6 +804,8 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
SealingAbort func(p0 context.Context, p1 storiface.CallID) error `perm:"admin"`
|
SealingAbort func(p0 context.Context, p1 storiface.CallID) error `perm:"admin"`
|
||||||
|
|
||||||
|
SealingRemoveRequest func(p0 context.Context, p1 uuid.UUID) error `perm:"admin"`
|
||||||
|
|
||||||
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
SealingSchedDiag func(p0 context.Context, p1 bool) (interface{}, error) `perm:"admin"`
|
||||||
|
|
||||||
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
SectorAbortUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||||
@ -860,6 +866,10 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetach func(p0 context.Context, p1 storiface.ID, p2 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
StorageDropSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
StorageDropSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||||
|
|
||||||
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
|
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
|
||||||
@ -874,6 +884,8 @@ type StorageMinerStruct struct {
|
|||||||
|
|
||||||
StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
|
StorageLock func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 storiface.SectorFileType) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageRedeclareLocal func(p0 context.Context, p1 *storiface.ID, p2 bool) error `perm:"admin"`
|
||||||
|
|
||||||
StorageReportHealth func(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error `perm:"admin"`
|
StorageReportHealth func(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error `perm:"admin"`
|
||||||
|
|
||||||
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
|
||||||
@ -965,8 +977,18 @@ type WorkerStruct struct {
|
|||||||
|
|
||||||
SetEnabled func(p0 context.Context, p1 bool) error `perm:"admin"`
|
SetEnabled func(p0 context.Context, p1 bool) error `perm:"admin"`
|
||||||
|
|
||||||
|
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||||
|
|
||||||
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetachAll func(p0 context.Context) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
|
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
|
||||||
|
|
||||||
|
StorageRedeclareLocal func(p0 context.Context, p1 *storiface.ID, p2 bool) error `perm:"admin"`
|
||||||
|
|
||||||
TaskDisable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
|
TaskDisable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
|
||||||
|
|
||||||
TaskEnable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
|
TaskEnable func(p0 context.Context, p1 sealtasks.TaskType) error `perm:"admin"`
|
||||||
@ -1336,6 +1358,17 @@ func (s *FullNodeStub) ChainNotify(p0 context.Context) (<-chan []*HeadChange, er
|
|||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ChainPrune(p0 context.Context, p1 PruneOpts) error {
|
||||||
|
if s.Internal.ChainPrune == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ChainPrune(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ChainPrune(p0 context.Context, p1 PruneOpts) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) ChainPutObj(p0 context.Context, p1 blocks.Block) error {
|
func (s *FullNodeStruct) ChainPutObj(p0 context.Context, p1 blocks.Block) error {
|
||||||
if s.Internal.ChainPutObj == nil {
|
if s.Internal.ChainPutObj == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -3954,6 +3987,17 @@ func (s *StorageMinerStub) ActorSectorSize(p0 context.Context, p1 address.Addres
|
|||||||
return *new(abi.SectorSize), ErrNotSupported
|
return *new(abi.SectorSize), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) ActorWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||||
|
if s.Internal.ActorWithdrawBalance == nil {
|
||||||
|
return *new(cid.Cid), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ActorWithdrawBalance(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) ActorWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||||
|
return *new(cid.Cid), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||||
if s.Internal.CheckProvable == nil {
|
if s.Internal.CheckProvable == nil {
|
||||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||||
@ -4746,6 +4790,17 @@ func (s *StorageMinerStub) SealingAbort(p0 context.Context, p1 storiface.CallID)
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) SealingRemoveRequest(p0 context.Context, p1 uuid.UUID) error {
|
||||||
|
if s.Internal.SealingRemoveRequest == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.SealingRemoveRequest(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) SealingRemoveRequest(p0 context.Context, p1 uuid.UUID) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) {
|
func (s *StorageMinerStruct) SealingSchedDiag(p0 context.Context, p1 bool) (interface{}, error) {
|
||||||
if s.Internal.SealingSchedDiag == nil {
|
if s.Internal.SealingSchedDiag == nil {
|
||||||
return nil, ErrNotSupported
|
return nil, ErrNotSupported
|
||||||
@ -5076,6 +5131,28 @@ func (s *StorageMinerStub) StorageDeclareSector(p0 context.Context, p1 storiface
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) StorageDetach(p0 context.Context, p1 storiface.ID, p2 string) error {
|
||||||
|
if s.Internal.StorageDetach == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetach(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) StorageDetach(p0 context.Context, p1 storiface.ID, p2 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageDetachLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetachLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
func (s *StorageMinerStruct) StorageDropSector(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType) error {
|
||||||
if s.Internal.StorageDropSector == nil {
|
if s.Internal.StorageDropSector == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -5153,6 +5230,17 @@ func (s *StorageMinerStub) StorageLock(p0 context.Context, p1 abi.SectorID, p2 s
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStruct) StorageRedeclareLocal(p0 context.Context, p1 *storiface.ID, p2 bool) error {
|
||||||
|
if s.Internal.StorageRedeclareLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageRedeclareLocal(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageMinerStub) StorageRedeclareLocal(p0 context.Context, p1 *storiface.ID, p2 bool) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
func (s *StorageMinerStruct) StorageReportHealth(p0 context.Context, p1 storiface.ID, p2 storiface.HealthReport) error {
|
||||||
if s.Internal.StorageReportHealth == nil {
|
if s.Internal.StorageReportHealth == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -5560,6 +5648,17 @@ func (s *WorkerStub) SetEnabled(p0 context.Context, p1 bool) error {
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) Shutdown(p0 context.Context) error {
|
||||||
|
if s.Internal.Shutdown == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.Shutdown(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) Shutdown(p0 context.Context) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error {
|
func (s *WorkerStruct) StorageAddLocal(p0 context.Context, p1 string) error {
|
||||||
if s.Internal.StorageAddLocal == nil {
|
if s.Internal.StorageAddLocal == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
@ -5571,6 +5670,50 @@ func (s *WorkerStub) StorageAddLocal(p0 context.Context, p1 string) error {
|
|||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) StorageDetachAll(p0 context.Context) error {
|
||||||
|
if s.Internal.StorageDetachAll == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetachAll(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) StorageDetachAll(p0 context.Context) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
if s.Internal.StorageDetachLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageDetachLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) StorageDetachLocal(p0 context.Context, p1 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
if s.Internal.StorageLocal == nil {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageLocal(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
|
||||||
|
return *new(map[storiface.ID]string), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStruct) StorageRedeclareLocal(p0 context.Context, p1 *storiface.ID, p2 bool) error {
|
||||||
|
if s.Internal.StorageRedeclareLocal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.StorageRedeclareLocal(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WorkerStub) StorageRedeclareLocal(p0 context.Context, p1 *storiface.ID, p2 bool) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *WorkerStruct) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error {
|
func (s *WorkerStruct) TaskDisable(p0 context.Context, p1 sealtasks.TaskType) error {
|
||||||
if s.Internal.TaskDisable == nil {
|
if s.Internal.TaskDisable == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
|
@ -386,7 +386,7 @@ type FullNode interface {
|
|||||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
|
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
|
||||||
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
//
|
//
|
||||||
// If a tipset key is provided, and a replacing message is found on chain,
|
// If a tipset key is provided, and a replacing message is not found on chain,
|
||||||
// the method will return an error saying that the message wasn't found
|
// the method will return an error saying that the message wasn't found
|
||||||
//
|
//
|
||||||
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
|
@ -102,6 +102,14 @@ func (b *idstore) Put(ctx context.Context, blk blocks.Block) error {
|
|||||||
return b.bs.Put(ctx, blk)
|
return b.bs.Put(ctx, blk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *idstore) ForEachKey(f func(cid.Cid) error) error {
|
||||||
|
iterBstore, ok := b.bs.(BlockstoreIterator)
|
||||||
|
if !ok {
|
||||||
|
return xerrors.Errorf("underlying blockstore (type %T) doesn't support fast iteration", b.bs)
|
||||||
|
}
|
||||||
|
return iterBstore.ForEachKey(f)
|
||||||
|
}
|
||||||
|
|
||||||
func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
||||||
toPut := make([]blocks.Block, 0, len(blks))
|
toPut := make([]blocks.Block, 0, len(blks))
|
||||||
for _, blk := range blks {
|
for _, blk := range blks {
|
||||||
|
@ -14,27 +14,28 @@ func NewMemory() MemBlockstore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
|
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
|
||||||
type MemBlockstore map[cid.Cid]blocks.Block
|
// To match behavior of badger blockstore we index by multihash only.
|
||||||
|
type MemBlockstore map[string]blocks.Block
|
||||||
|
|
||||||
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
|
||||||
delete(m, k)
|
delete(m, string(k.Hash()))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
|
func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
|
||||||
for _, k := range ks {
|
for _, k := range ks {
|
||||||
delete(m, k)
|
delete(m, string(k.Hash()))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
|
func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
|
||||||
_, ok := m[k]
|
_, ok := m[string(k.Hash())]
|
||||||
return ok, nil
|
return ok, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
|
func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
|
||||||
b, ok := m[k]
|
b, ok := m[string(k.Hash())]
|
||||||
if !ok {
|
if !ok {
|
||||||
return ipld.ErrNotFound{Cid: k}
|
return ipld.ErrNotFound{Cid: k}
|
||||||
}
|
}
|
||||||
@ -42,7 +43,7 @@ func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
|
func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
|
||||||
b, ok := m[k]
|
b, ok := m[string(k.Hash())]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ipld.ErrNotFound{Cid: k}
|
return nil, ipld.ErrNotFound{Cid: k}
|
||||||
}
|
}
|
||||||
@ -51,7 +52,7 @@ func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error)
|
|||||||
|
|
||||||
// GetSize returns the CIDs mapped BlockSize
|
// GetSize returns the CIDs mapped BlockSize
|
||||||
func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
||||||
b, ok := m[k]
|
b, ok := m[string(k.Hash())]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, ipld.ErrNotFound{Cid: k}
|
return 0, ipld.ErrNotFound{Cid: k}
|
||||||
}
|
}
|
||||||
@ -62,7 +63,7 @@ func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
|
|||||||
func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
||||||
// Convert to a basic block for safety, but try to reuse the existing
|
// Convert to a basic block for safety, but try to reuse the existing
|
||||||
// block if it's already a basic block.
|
// block if it's already a basic block.
|
||||||
k := b.Cid()
|
k := string(b.Cid().Hash())
|
||||||
if _, ok := b.(*blocks.BasicBlock); !ok {
|
if _, ok := b.(*blocks.BasicBlock); !ok {
|
||||||
// If we already have the block, abort.
|
// If we already have the block, abort.
|
||||||
if _, ok := m[k]; ok {
|
if _, ok := m[k]; ok {
|
||||||
@ -71,7 +72,7 @@ func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error {
|
|||||||
// the error is only for debugging.
|
// the error is only for debugging.
|
||||||
b, _ = blocks.NewBlockWithCid(b.RawData(), b.Cid())
|
b, _ = blocks.NewBlockWithCid(b.RawData(), b.Cid())
|
||||||
}
|
}
|
||||||
m[b.Cid()] = b
|
m[k] = b
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,8 +90,8 @@ func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
|
|||||||
// the given context, closing the channel if it becomes Done.
|
// the given context, closing the channel if it becomes Done.
|
||||||
func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
ch := make(chan cid.Cid, len(m))
|
ch := make(chan cid.Cid, len(m))
|
||||||
for k := range m {
|
for _, b := range m {
|
||||||
ch <- k
|
ch <- b.Cid()
|
||||||
}
|
}
|
||||||
close(ch)
|
close(ch)
|
||||||
return ch, nil
|
return ch, nil
|
||||||
|
@ -43,8 +43,16 @@ var (
|
|||||||
// compactionIndexKey stores the compaction index (serial number)
|
// compactionIndexKey stores the compaction index (serial number)
|
||||||
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
|
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
|
||||||
|
|
||||||
|
// stores the prune index (serial number)
|
||||||
|
pruneIndexKey = dstore.NewKey("/splitstore/pruneIndex")
|
||||||
|
|
||||||
|
// stores the base epoch of last prune in the metadata store
|
||||||
|
pruneEpochKey = dstore.NewKey("/splitstore/pruneEpoch")
|
||||||
|
|
||||||
log = logging.Logger("splitstore")
|
log = logging.Logger("splitstore")
|
||||||
|
|
||||||
|
errClosing = errors.New("splitstore is closing")
|
||||||
|
|
||||||
// set this to true if you are debugging the splitstore to enable debug logging
|
// set this to true if you are debugging the splitstore to enable debug logging
|
||||||
enableDebugLog = false
|
enableDebugLog = false
|
||||||
// set this to true if you want to track origin stack traces in the write log
|
// set this to true if you want to track origin stack traces in the write log
|
||||||
@ -54,6 +62,16 @@ var (
|
|||||||
upgradeBoundary = build.Finality
|
upgradeBoundary = build.Finality
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type CompactType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
none CompactType = iota
|
||||||
|
warmup
|
||||||
|
hot
|
||||||
|
cold
|
||||||
|
check
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
|
||||||
enableDebugLog = true
|
enableDebugLog = true
|
||||||
@ -93,6 +111,21 @@ type Config struct {
|
|||||||
// A positive value is the number of compactions before a full GC is performed;
|
// A positive value is the number of compactions before a full GC is performed;
|
||||||
// a value of 1 will perform full GC in every compaction.
|
// a value of 1 will perform full GC in every compaction.
|
||||||
HotStoreFullGCFrequency uint64
|
HotStoreFullGCFrequency uint64
|
||||||
|
|
||||||
|
// EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||||
|
// where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||||
|
// Default is false
|
||||||
|
EnableColdStoreAutoPrune bool
|
||||||
|
|
||||||
|
// ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||||
|
// Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||||
|
// full GC in every prune.
|
||||||
|
// Default is 7 (about once every a week)
|
||||||
|
ColdStoreFullGCFrequency uint64
|
||||||
|
|
||||||
|
// ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||||
|
// finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||||
|
ColdStoreRetention int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
||||||
@ -117,7 +150,8 @@ type hotstore interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SplitStore struct {
|
type SplitStore struct {
|
||||||
compacting int32 // compaction/prune/warmup in progress
|
compacting int32 // flag for when compaction is in progress
|
||||||
|
compactType CompactType // compaction type, protected by compacting atomic, only meaningful when compacting == 1
|
||||||
closing int32 // the splitstore is closing
|
closing int32 // the splitstore is closing
|
||||||
|
|
||||||
cfg *Config
|
cfg *Config
|
||||||
@ -126,6 +160,7 @@ type SplitStore struct {
|
|||||||
mx sync.Mutex
|
mx sync.Mutex
|
||||||
warmupEpoch abi.ChainEpoch // protected by mx
|
warmupEpoch abi.ChainEpoch // protected by mx
|
||||||
baseEpoch abi.ChainEpoch // protected by compaction lock
|
baseEpoch abi.ChainEpoch // protected by compaction lock
|
||||||
|
pruneEpoch abi.ChainEpoch // protected by compaction lock
|
||||||
|
|
||||||
headChangeMx sync.Mutex
|
headChangeMx sync.Mutex
|
||||||
|
|
||||||
@ -140,6 +175,7 @@ type SplitStore struct {
|
|||||||
markSetSize int64
|
markSetSize int64
|
||||||
|
|
||||||
compactionIndex int64
|
compactionIndex int64
|
||||||
|
pruneIndex int64
|
||||||
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel func()
|
cancel func()
|
||||||
@ -227,6 +263,13 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
|||||||
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ss.pruneCheckpointExists() {
|
||||||
|
log.Info("found prune checkpoint; resuming prune")
|
||||||
|
if err := ss.completePrune(); err != nil {
|
||||||
|
markSetEnv.Close() //nolint:errcheck
|
||||||
|
return nil, xerrors.Errorf("error resuming prune: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return ss, nil
|
return ss, nil
|
||||||
}
|
}
|
||||||
@ -260,8 +303,14 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
|||||||
if has {
|
if has {
|
||||||
return s.has(cid)
|
return s.has(cid)
|
||||||
}
|
}
|
||||||
|
switch s.compactType {
|
||||||
|
case hot:
|
||||||
return s.cold.Has(ctx, cid)
|
return s.cold.Has(ctx, cid)
|
||||||
|
case cold:
|
||||||
|
return s.hot.Has(ctx, cid)
|
||||||
|
default:
|
||||||
|
return false, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
has, err := s.hot.Has(ctx, cid)
|
has, err := s.hot.Has(ctx, cid)
|
||||||
@ -276,9 +325,12 @@ func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
has, err = s.cold.Has(ctx, cid)
|
has, err = s.cold.Has(ctx, cid)
|
||||||
if has && bstore.IsHotView(ctx) {
|
if has {
|
||||||
|
s.trackTxnRef(cid)
|
||||||
|
if bstore.IsHotView(ctx) {
|
||||||
s.reifyColdObject(cid)
|
s.reifyColdObject(cid)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return has, err
|
return has, err
|
||||||
|
|
||||||
@ -307,8 +359,14 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
|||||||
if has {
|
if has {
|
||||||
return s.get(cid)
|
return s.get(cid)
|
||||||
}
|
}
|
||||||
|
switch s.compactType {
|
||||||
|
case hot:
|
||||||
return s.cold.Get(ctx, cid)
|
return s.cold.Get(ctx, cid)
|
||||||
|
case cold:
|
||||||
|
return s.hot.Get(ctx, cid)
|
||||||
|
default:
|
||||||
|
return nil, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err := s.hot.Get(ctx, cid)
|
blk, err := s.hot.Get(ctx, cid)
|
||||||
@ -325,6 +383,7 @@ func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error)
|
|||||||
|
|
||||||
blk, err = s.cold.Get(ctx, cid)
|
blk, err = s.cold.Get(ctx, cid)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
s.trackTxnRef(cid)
|
||||||
if bstore.IsHotView(ctx) {
|
if bstore.IsHotView(ctx) {
|
||||||
s.reifyColdObject(cid)
|
s.reifyColdObject(cid)
|
||||||
}
|
}
|
||||||
@ -361,8 +420,14 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
|||||||
if has {
|
if has {
|
||||||
return s.getSize(cid)
|
return s.getSize(cid)
|
||||||
}
|
}
|
||||||
|
switch s.compactType {
|
||||||
|
case hot:
|
||||||
return s.cold.GetSize(ctx, cid)
|
return s.cold.GetSize(ctx, cid)
|
||||||
|
case cold:
|
||||||
|
return s.hot.GetSize(ctx, cid)
|
||||||
|
default:
|
||||||
|
return 0, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err := s.hot.GetSize(ctx, cid)
|
size, err := s.hot.GetSize(ctx, cid)
|
||||||
@ -379,6 +444,7 @@ func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
|||||||
|
|
||||||
size, err = s.cold.GetSize(ctx, cid)
|
size, err = s.cold.GetSize(ctx, cid)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
s.trackTxnRef(cid)
|
||||||
if bstore.IsHotView(ctx) {
|
if bstore.IsHotView(ctx) {
|
||||||
s.reifyColdObject(cid)
|
s.reifyColdObject(cid)
|
||||||
}
|
}
|
||||||
@ -408,12 +474,12 @@ func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error {
|
|||||||
s.debug.LogWrite(blk)
|
s.debug.LogWrite(blk)
|
||||||
|
|
||||||
// critical section
|
// critical section
|
||||||
if s.txnMarkSet != nil {
|
if s.txnMarkSet != nil && s.compactType == hot { // puts only touch hot store
|
||||||
s.markLiveRefs([]cid.Cid{blk.Cid()})
|
s.markLiveRefs([]cid.Cid{blk.Cid()})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.trackTxnRef(blk.Cid())
|
s.trackTxnRef(blk.Cid())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,12 +525,12 @@ func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error {
|
|||||||
s.debug.LogWriteMany(blks)
|
s.debug.LogWriteMany(blks)
|
||||||
|
|
||||||
// critical section
|
// critical section
|
||||||
if s.txnMarkSet != nil {
|
if s.txnMarkSet != nil && s.compactType == hot { // puts only touch hot store
|
||||||
s.markLiveRefs(batch)
|
s.markLiveRefs(batch)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.trackTxnRefMany(batch)
|
s.trackTxnRefMany(batch)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,8 +602,14 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro
|
|||||||
if has {
|
if has {
|
||||||
return s.view(cid, cb)
|
return s.view(cid, cb)
|
||||||
}
|
}
|
||||||
|
switch s.compactType {
|
||||||
|
case hot:
|
||||||
return s.cold.View(ctx, cid, cb)
|
return s.cold.View(ctx, cid, cb)
|
||||||
|
case cold:
|
||||||
|
return s.hot.View(ctx, cid, cb)
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// views are (optimistically) protected two-fold:
|
// views are (optimistically) protected two-fold:
|
||||||
@ -623,6 +695,23 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error
|
|||||||
return xerrors.Errorf("error loading base epoch: %w", err)
|
return xerrors.Errorf("error loading base epoch: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// load prune epoch from metadata ds
|
||||||
|
bs, err = s.ds.Get(s.ctx, pruneEpochKey)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.pruneEpoch = bytesToEpoch(bs)
|
||||||
|
case dstore.ErrNotFound:
|
||||||
|
if curTs == nil {
|
||||||
|
//this can happen in some tests
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err := s.setPruneEpoch(curTs.Height()); err != nil {
|
||||||
|
return xerrors.Errorf("error saving prune epoch: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("error loading prune epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// load warmup epoch from metadata ds
|
// load warmup epoch from metadata ds
|
||||||
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
bs, err = s.ds.Get(s.ctx, warmupEpochKey)
|
||||||
switch err {
|
switch err {
|
||||||
@ -722,3 +811,8 @@ func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
|||||||
s.baseEpoch = epoch
|
s.baseEpoch = epoch
|
||||||
return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch))
|
return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) setPruneEpoch(epoch abi.ChainEpoch) error {
|
||||||
|
s.pruneEpoch = epoch
|
||||||
|
return s.ds.Put(s.ctx, pruneEpochKey, epochToBytes(epoch))
|
||||||
|
}
|
||||||
|
@ -25,6 +25,7 @@ func (s *SplitStore) Check() error {
|
|||||||
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
||||||
return xerrors.Errorf("can't acquire compaction lock; compacting operation in progress")
|
return xerrors.Errorf("can't acquire compaction lock; compacting operation in progress")
|
||||||
}
|
}
|
||||||
|
s.compactType = check
|
||||||
|
|
||||||
if s.compactionIndex == 0 {
|
if s.compactionIndex == 0 {
|
||||||
atomic.StoreInt32(&s.compacting, 0)
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
@ -146,6 +147,8 @@ func (s *SplitStore) Info() map[string]interface{} {
|
|||||||
info["base epoch"] = s.baseEpoch
|
info["base epoch"] = s.baseEpoch
|
||||||
info["warmup epoch"] = s.warmupEpoch
|
info["warmup epoch"] = s.warmupEpoch
|
||||||
info["compactions"] = s.compactionIndex
|
info["compactions"] = s.compactionIndex
|
||||||
|
info["prunes"] = s.pruneIndex
|
||||||
|
info["compacting"] = s.compacting == 1
|
||||||
|
|
||||||
sizer, ok := s.hot.(bstore.BlockstoreSize)
|
sizer, ok := s.hot.(bstore.BlockstoreSize)
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/metrics"
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
@ -52,6 +53,12 @@ var (
|
|||||||
// SyncWaitTime is the time delay from a tipset's min timestamp before we decide
|
// SyncWaitTime is the time delay from a tipset's min timestamp before we decide
|
||||||
// we have synced.
|
// we have synced.
|
||||||
SyncWaitTime = 30 * time.Second
|
SyncWaitTime = 30 * time.Second
|
||||||
|
|
||||||
|
// This is a testing flag that should always be true when running a node. itests rely on the rough hack
|
||||||
|
// of starting genesis so far in the past that they exercise catchup mining to mine
|
||||||
|
// blocks quickly and so disabling syncgap checking is necessary to test compaction
|
||||||
|
// without a deep structural improvement of itests.
|
||||||
|
CheckSyncGap = true
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -81,7 +88,7 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
// this is guaranteed by the chainstore, and it is pervasive in all lotus
|
// this is guaranteed by the chainstore, and it is pervasive in all lotus
|
||||||
// -- if that ever changes then all hell will break loose in general and
|
// -- if that ever changes then all hell will break loose in general and
|
||||||
// we will have a rance to protectTipSets here.
|
// we will have a rance to protectTipSets here.
|
||||||
// Reagrdless, we put a mutex in HeadChange just to be safe
|
// Regardless, we put a mutex in HeadChange just to be safe
|
||||||
|
|
||||||
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
||||||
// we are currently compacting -- protect the new tipset(s)
|
// we are currently compacting -- protect the new tipset(s)
|
||||||
@ -96,7 +103,8 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
|
timestamp := time.Unix(int64(curTs.MinTimestamp()), 0)
|
||||||
if time.Since(timestamp) > SyncGapTime {
|
|
||||||
|
if CheckSyncGap && time.Since(timestamp) > SyncGapTime {
|
||||||
// don't attempt compaction before we have caught up syncing
|
// don't attempt compaction before we have caught up syncing
|
||||||
atomic.StoreInt32(&s.compacting, 0)
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
return nil
|
return nil
|
||||||
@ -108,9 +116,12 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prioritize hot store compaction over cold store prune
|
||||||
|
|
||||||
if epoch-s.baseEpoch > CompactionThreshold {
|
if epoch-s.baseEpoch > CompactionThreshold {
|
||||||
// it's time to compact -- prepare the transaction and go!
|
// it's time to compact -- prepare the transaction and go!
|
||||||
s.beginTxnProtect()
|
s.beginTxnProtect()
|
||||||
|
s.compactType = hot
|
||||||
go func() {
|
go func() {
|
||||||
defer atomic.StoreInt32(&s.compacting, 0)
|
defer atomic.StoreInt32(&s.compacting, 0)
|
||||||
defer s.endTxnProtect()
|
defer s.endTxnProtect()
|
||||||
@ -122,6 +133,40 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
|
|
||||||
log.Infow("compaction done", "took", time.Since(start))
|
log.Infow("compaction done", "took", time.Since(start))
|
||||||
}()
|
}()
|
||||||
|
// only prune if auto prune is enabled and after at least one compaction
|
||||||
|
} else if s.cfg.EnableColdStoreAutoPrune && epoch-s.pruneEpoch > PruneThreshold && s.compactionIndex > 0 {
|
||||||
|
s.beginTxnProtect()
|
||||||
|
s.compactType = cold
|
||||||
|
go func() {
|
||||||
|
defer atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
defer s.endTxnProtect()
|
||||||
|
|
||||||
|
log.Info("pruning splitstore")
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
var retainP func(int64) bool
|
||||||
|
switch {
|
||||||
|
case s.cfg.ColdStoreRetention > int64(0):
|
||||||
|
retainP = func(depth int64) bool {
|
||||||
|
return depth <= int64(CompactionBoundary)+s.cfg.ColdStoreRetention*int64(build.Finality)
|
||||||
|
}
|
||||||
|
case s.cfg.ColdStoreRetention < 0:
|
||||||
|
retainP = func(_ int64) bool { return true }
|
||||||
|
default:
|
||||||
|
retainP = func(depth int64) bool {
|
||||||
|
return depth <= int64(CompactionBoundary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
movingGC := s.cfg.ColdStoreFullGCFrequency > 0 && s.pruneIndex%int64(s.cfg.ColdStoreFullGCFrequency) == 0
|
||||||
|
var gcOpts []bstore.BlockstoreGCOption
|
||||||
|
if movingGC {
|
||||||
|
gcOpts = append(gcOpts, bstore.WithFullGC(true))
|
||||||
|
}
|
||||||
|
doGC := func() error { return s.gcBlockstore(s.cold, gcOpts) }
|
||||||
|
|
||||||
|
s.prune(curTs, retainP, doGC)
|
||||||
|
log.Infow("prune done", "took", time.Since(start))
|
||||||
|
}()
|
||||||
} else {
|
} else {
|
||||||
// no compaction necessary
|
// no compaction necessary
|
||||||
atomic.StoreInt32(&s.compacting, 0)
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
@ -587,7 +632,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
|||||||
|
|
||||||
// some stats for logging
|
// some stats for logging
|
||||||
var hotCnt, coldCnt int
|
var hotCnt, coldCnt int
|
||||||
|
|
||||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||||
// was it marked?
|
// was it marked?
|
||||||
mark, err := markSet.Has(c)
|
mark, err := markSet.Has(c)
|
||||||
@ -608,7 +652,6 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("error collecting cold objects: %w", err)
|
return xerrors.Errorf("error collecting cold objects: %w", err)
|
||||||
}
|
}
|
||||||
@ -777,6 +820,10 @@ func (s *SplitStore) beginCriticalSection(markSet MarkSet) error {
|
|||||||
|
|
||||||
func (s *SplitStore) waitForSync() {
|
func (s *SplitStore) waitForSync() {
|
||||||
log.Info("waiting for sync")
|
log.Info("waiting for sync")
|
||||||
|
if !CheckSyncGap {
|
||||||
|
log.Warnf("If you see this outside of test it is a serious splitstore issue")
|
||||||
|
return
|
||||||
|
}
|
||||||
startWait := time.Now()
|
startWait := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
log.Infow("waiting for sync done", "took", time.Since(startWait))
|
log.Infow("waiting for sync done", "took", time.Since(startWait))
|
||||||
@ -1115,7 +1162,6 @@ func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
|
|||||||
if err := s.checkClosing(); err != nil {
|
if err := s.checkClosing(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blk, err := s.hot.Get(s.ctx, c)
|
blk, err := s.hot.Get(s.ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ipld.IsNotFound(err) {
|
if ipld.IsNotFound(err) {
|
||||||
@ -1133,6 +1179,7 @@ func (s *SplitStore) moveColdBlocks(coldr *ColdSetReader) error {
|
|||||||
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
return xerrors.Errorf("error putting batch to coldstore: %w", err)
|
||||||
}
|
}
|
||||||
batch = batch[:0]
|
batch = batch[:0]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -1221,9 +1268,18 @@ func (s *SplitStore) purgeBatch(batch, deadCids []cid.Cid, checkpoint *Checkpoin
|
|||||||
return 0, liveCnt, nil
|
return 0, liveCnt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch s.compactType {
|
||||||
|
case hot:
|
||||||
if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil {
|
if err := s.hot.DeleteMany(s.ctx, deadCids); err != nil {
|
||||||
return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err)
|
return 0, liveCnt, xerrors.Errorf("error purging cold objects: %w", err)
|
||||||
}
|
}
|
||||||
|
case cold:
|
||||||
|
if err := s.cold.DeleteMany(s.ctx, deadCids); err != nil {
|
||||||
|
return 0, liveCnt, xerrors.Errorf("error purging dead objects: %w", err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, liveCnt, xerrors.Errorf("invalid compaction type %d, only hot and cold allowed for critical section", s.compactType)
|
||||||
|
}
|
||||||
|
|
||||||
s.debug.LogDelete(deadCids)
|
s.debug.LogDelete(deadCids)
|
||||||
purgeCnt = len(deadCids)
|
purgeCnt = len(deadCids)
|
||||||
@ -1239,15 +1295,28 @@ func (s *SplitStore) coldSetPath() string {
|
|||||||
return filepath.Join(s.path, "coldset")
|
return filepath.Join(s.path, "coldset")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) deadSetPath() string {
|
||||||
|
return filepath.Join(s.path, "deadset")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SplitStore) checkpointPath() string {
|
func (s *SplitStore) checkpointPath() string {
|
||||||
return filepath.Join(s.path, "checkpoint")
|
return filepath.Join(s.path, "checkpoint")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) pruneCheckpointPath() string {
|
||||||
|
return filepath.Join(s.path, "prune-checkpoint")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SplitStore) checkpointExists() bool {
|
func (s *SplitStore) checkpointExists() bool {
|
||||||
_, err := os.Stat(s.checkpointPath())
|
_, err := os.Stat(s.checkpointPath())
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) pruneCheckpointExists() bool {
|
||||||
|
_, err := os.Stat(s.pruneCheckpointPath())
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *SplitStore) completeCompaction() error {
|
func (s *SplitStore) completeCompaction() error {
|
||||||
checkpoint, last, err := OpenCheckpoint(s.checkpointPath())
|
checkpoint, last, err := OpenCheckpoint(s.checkpointPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1268,6 +1337,7 @@ func (s *SplitStore) completeCompaction() error {
|
|||||||
defer markSet.Close() //nolint:errcheck
|
defer markSet.Close() //nolint:errcheck
|
||||||
|
|
||||||
// PURGE
|
// PURGE
|
||||||
|
s.compactType = hot
|
||||||
log.Info("purging cold objects from the hotstore")
|
log.Info("purging cold objects from the hotstore")
|
||||||
startPurge := time.Now()
|
startPurge := time.Now()
|
||||||
err = s.completePurge(coldr, checkpoint, last, markSet)
|
err = s.completePurge(coldr, checkpoint, last, markSet)
|
||||||
@ -1290,6 +1360,7 @@ func (s *SplitStore) completeCompaction() error {
|
|||||||
if err := os.Remove(s.coldSetPath()); err != nil {
|
if err := os.Remove(s.coldSetPath()); err != nil {
|
||||||
log.Warnf("error removing coldset: %s", err)
|
log.Warnf("error removing coldset: %s", err)
|
||||||
}
|
}
|
||||||
|
s.compactType = none
|
||||||
|
|
||||||
// Note: at this point we can start the splitstore; a compaction should run on
|
// Note: at this point we can start the splitstore; a compaction should run on
|
||||||
// the first head change, which will trigger gc on the hotstore.
|
// the first head change, which will trigger gc on the hotstore.
|
||||||
|
@ -43,6 +43,7 @@ func (es *exposedSplitStore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||||
|
|
||||||
if isIdentiyCid(c) {
|
if isIdentiyCid(c) {
|
||||||
data, err := decodeIdentityCid(c)
|
data, err := decodeIdentityCid(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -27,7 +27,7 @@ func (s *SplitStore) gcBlockstore(b bstore.Blockstore, opts []bstore.BlockstoreG
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infow("garbage collecting hotstore done", "took", time.Since(startGC))
|
log.Infow("garbage collecting blockstore done", "took", time.Since(startGC))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
574
blockstore/splitstore/splitstore_prune.go
Normal file
574
blockstore/splitstore/splitstore_prune.go
Normal file
@ -0,0 +1,574 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// PruneOnline is a prune option that instructs PruneChain to use online gc for reclaiming space;
|
||||||
|
// there is no value associated with this option.
|
||||||
|
PruneOnlineGC = "splitstore.PruneOnlineGC"
|
||||||
|
|
||||||
|
// PruneMoving is a prune option that instructs PruneChain to use moving gc for reclaiming space;
|
||||||
|
// the value associated with this option is the path of the new coldstore.
|
||||||
|
PruneMovingGC = "splitstore.PruneMovingGC"
|
||||||
|
|
||||||
|
// PruneRetainState is a prune option that instructs PruneChain as to how many finalities worth
|
||||||
|
// of state to retain in the coldstore.
|
||||||
|
// The value is an integer:
|
||||||
|
// - if it is -1 then all state objects reachable from the chain will be retained in the coldstore.
|
||||||
|
// this is useful for garbage collecting side-chains and other garbage in archival nodes.
|
||||||
|
// This is the (safe) default.
|
||||||
|
// - if it is 0 then no state objects that are unreachable within the compaction boundary will
|
||||||
|
// be retained in the coldstore.
|
||||||
|
// - if it is a positive integer, then it's the number of finalities past the compaction boundary
|
||||||
|
// for which chain-reachable state objects are retained.
|
||||||
|
PruneRetainState = "splitstore.PruneRetainState"
|
||||||
|
|
||||||
|
// PruneThreshold is the number of epochs that need to have elapsed
|
||||||
|
// from the previously pruned epoch to trigger a new prune
|
||||||
|
PruneThreshold = 7 * build.Finality
|
||||||
|
)
|
||||||
|
|
||||||
|
// PruneChain instructs the SplitStore to prune chain state in the coldstore, according to the
|
||||||
|
// options specified.
|
||||||
|
func (s *SplitStore) PruneChain(opts api.PruneOpts) error {
|
||||||
|
retainState := opts.RetainState
|
||||||
|
|
||||||
|
var gcOpts []bstore.BlockstoreGCOption
|
||||||
|
if opts.MovingGC {
|
||||||
|
gcOpts = append(gcOpts, bstore.WithFullGC(true))
|
||||||
|
}
|
||||||
|
doGC := func() error { return s.gcBlockstore(s.cold, gcOpts) }
|
||||||
|
|
||||||
|
var retainStateP func(int64) bool
|
||||||
|
switch {
|
||||||
|
case retainState > 0:
|
||||||
|
retainStateP = func(depth int64) bool {
|
||||||
|
return depth <= int64(CompactionBoundary)+retainState*int64(build.Finality)
|
||||||
|
}
|
||||||
|
case retainState < 0:
|
||||||
|
retainStateP = func(_ int64) bool { return true }
|
||||||
|
default:
|
||||||
|
retainStateP = func(depth int64) bool {
|
||||||
|
return depth <= int64(CompactionBoundary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := s.cold.(bstore.BlockstoreIterator); !ok {
|
||||||
|
return xerrors.Errorf("coldstore does not support efficient iteration")
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.pruneChain(retainStateP, doGC)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) pruneChain(retainStateP func(int64) bool, doGC func() error) error {
|
||||||
|
// inhibit compaction while we are setting up
|
||||||
|
s.headChangeMx.Lock()
|
||||||
|
defer s.headChangeMx.Unlock()
|
||||||
|
|
||||||
|
// take the compaction lock; fail if there is a compaction in progress
|
||||||
|
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
||||||
|
return xerrors.Errorf("compaction, prune or warmup in progress")
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we are actually closing first
|
||||||
|
if atomic.LoadInt32(&s.closing) == 1 {
|
||||||
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
return errClosing
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure that we have compacted at least once
|
||||||
|
if s.compactionIndex == 0 {
|
||||||
|
atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
return xerrors.Errorf("splitstore has not compacted yet")
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the current tipset
|
||||||
|
curTs := s.chain.GetHeaviestTipSet()
|
||||||
|
|
||||||
|
// begin the transaction and go
|
||||||
|
s.beginTxnProtect()
|
||||||
|
s.compactType = cold
|
||||||
|
go func() {
|
||||||
|
defer atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
defer s.endTxnProtect()
|
||||||
|
|
||||||
|
log.Info("pruning splitstore")
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
s.prune(curTs, retainStateP, doGC)
|
||||||
|
|
||||||
|
log.Infow("prune done", "took", time.Since(start))
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) prune(curTs *types.TipSet, retainStateP func(int64) bool, doGC func() error) {
|
||||||
|
log.Debug("waiting for active views to complete")
|
||||||
|
start := time.Now()
|
||||||
|
s.viewWait()
|
||||||
|
log.Debugw("waiting for active views done", "took", time.Since(start))
|
||||||
|
|
||||||
|
err := s.doPrune(curTs, retainStateP, doGC)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("PRUNE ERROR: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool, doGC func() error) error {
|
||||||
|
currentEpoch := curTs.Height()
|
||||||
|
boundaryEpoch := currentEpoch - CompactionBoundary
|
||||||
|
|
||||||
|
log.Infow("running prune", "currentEpoch", currentEpoch, "pruneEpoch", s.pruneEpoch)
|
||||||
|
|
||||||
|
markSet, err := s.markSetEnv.New("live", s.markSetSize)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error creating mark set: %w", err)
|
||||||
|
}
|
||||||
|
defer markSet.Close() //nolint:errcheck
|
||||||
|
defer s.debug.Flush()
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0. track all protected references at beginning of compaction; anything added later should
|
||||||
|
// be transactionally protected by the write
|
||||||
|
log.Info("protecting references with registered protectors")
|
||||||
|
err = s.applyProtectors()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. mark reachable objects by walking the chain from the current epoch; we keep all messages
|
||||||
|
// and chain headers; state and reciepts are retained only if it is within retention policy scope
|
||||||
|
log.Info("marking reachable objects")
|
||||||
|
startMark := time.Now()
|
||||||
|
|
||||||
|
count := new(int64)
|
||||||
|
err = s.walkChainDeep(curTs, retainStateP,
|
||||||
|
func(c cid.Cid) error {
|
||||||
|
if isUnitaryObject(c) {
|
||||||
|
return errStopWalk
|
||||||
|
}
|
||||||
|
|
||||||
|
mark, err := markSet.Has(c)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error checking markset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mark {
|
||||||
|
return errStopWalk
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.AddInt64(count, 1)
|
||||||
|
return markSet.Mark(c)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error marking: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("marking done", "took", time.Since(startMark), "marked", count)
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1.1 protect transactional refs
|
||||||
|
err = s.protectTxnRefs(markSet)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error protecting transactional refs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. iterate through the coldstore to collect dead objects
|
||||||
|
log.Info("collecting dead objects")
|
||||||
|
startCollect := time.Now()
|
||||||
|
|
||||||
|
deadw, err := NewColdSetWriter(s.deadSetPath())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error creating coldset: %w", err)
|
||||||
|
}
|
||||||
|
defer deadw.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
// some stats for logging
|
||||||
|
var liveCnt, deadCnt int
|
||||||
|
|
||||||
|
err = s.cold.(bstore.BlockstoreIterator).ForEachKey(func(c cid.Cid) error {
|
||||||
|
// was it marked?
|
||||||
|
mark, err := markSet.Has(c)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error checking mark set for %s: %w", c, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mark {
|
||||||
|
liveCnt++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: a possible optimization here is to also purge objects that are in the hotstore
|
||||||
|
// but this needs special care not to purge genesis state, so we don't bother (yet)
|
||||||
|
|
||||||
|
// it's dead in the coldstore, mark it as candidate for purge
|
||||||
|
|
||||||
|
if err := deadw.Write(c); err != nil {
|
||||||
|
return xerrors.Errorf("error writing cid to coldstore: %w", err)
|
||||||
|
}
|
||||||
|
deadCnt++
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error dead objects: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := deadw.Close(); err != nil {
|
||||||
|
return xerrors.Errorf("error closing deadset: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.Record(s.ctx, metrics.SplitstoreCompactionDead.M(int64(deadCnt)))
|
||||||
|
|
||||||
|
log.Infow("dead collection done", "took", time.Since(startCollect))
|
||||||
|
log.Infow("prune stats", "live", liveCnt, "dead", deadCnt)
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// now that we have collected dead objects, check for missing references from transactional i/o
|
||||||
|
// this is carried over from hot compaction for completeness
|
||||||
|
s.waitForMissingRefs(markSet)
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error opening deadset: %w", err)
|
||||||
|
}
|
||||||
|
defer deadr.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
// 3. Purge dead objects with checkpointing for recovery.
|
||||||
|
// This is the critical section of prune, whereby any dead object not in the markSet is
|
||||||
|
// considered already deleted.
|
||||||
|
// We delete dead objects in batches, holding the transaction lock, where we check the markSet
|
||||||
|
// again for new references created by the caller.
|
||||||
|
// After each batch we write a checkpoint to disk; if the process is interrupted before completion
|
||||||
|
// the process will continue from the checkpoint in the next recovery.
|
||||||
|
if err := s.beginCriticalSection(markSet); err != nil {
|
||||||
|
return xerrors.Errorf("error beginning critical section: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
checkpoint, err := NewCheckpoint(s.pruneCheckpointPath())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error creating checkpoint: %w", err)
|
||||||
|
}
|
||||||
|
defer checkpoint.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
log.Info("purging dead objects from the coldstore")
|
||||||
|
startPurge := time.Now()
|
||||||
|
err = s.purge(deadr, checkpoint, markSet)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error purging dead objects: %w", err)
|
||||||
|
}
|
||||||
|
log.Infow("purging dead objects from coldstore done", "took", time.Since(startPurge))
|
||||||
|
|
||||||
|
s.endCriticalSection()
|
||||||
|
|
||||||
|
if err := checkpoint.Close(); err != nil {
|
||||||
|
log.Warnf("error closing checkpoint: %s", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(s.pruneCheckpointPath()); err != nil {
|
||||||
|
log.Warnf("error removing checkpoint: %s", err)
|
||||||
|
}
|
||||||
|
if err := deadr.Close(); err != nil {
|
||||||
|
log.Warnf("error closing deadset: %s", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||||
|
log.Warnf("error removing deadset: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we are done; do some housekeeping
|
||||||
|
s.endTxnProtect()
|
||||||
|
err = doGC()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error garbage collecting cold store: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.setPruneEpoch(boundaryEpoch); err != nil {
|
||||||
|
return xerrors.Errorf("error saving prune base epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.pruneIndex++
|
||||||
|
err = s.ds.Put(s.ctx, pruneIndexKey, int64ToBytes(s.compactionIndex))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error saving compaction index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) completePrune() error {
|
||||||
|
checkpoint, last, err := OpenCheckpoint(s.pruneCheckpointPath())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error opening checkpoint: %w", err)
|
||||||
|
}
|
||||||
|
defer checkpoint.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error opening deadset: %w", err)
|
||||||
|
}
|
||||||
|
defer deadr.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
markSet, err := s.markSetEnv.Recover("live")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error recovering markset: %w", err)
|
||||||
|
}
|
||||||
|
defer markSet.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
// PURGE!
|
||||||
|
s.compactType = cold
|
||||||
|
log.Info("purging dead objects from the coldstore")
|
||||||
|
startPurge := time.Now()
|
||||||
|
err = s.completePurge(deadr, checkpoint, last, markSet)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error purgin dead objects: %w", err)
|
||||||
|
}
|
||||||
|
log.Infow("purging dead objects from the coldstore done", "took", time.Since(startPurge))
|
||||||
|
|
||||||
|
markSet.EndCriticalSection()
|
||||||
|
s.compactType = none
|
||||||
|
|
||||||
|
if err := checkpoint.Close(); err != nil {
|
||||||
|
log.Warnf("error closing checkpoint: %s", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(s.pruneCheckpointPath()); err != nil {
|
||||||
|
log.Warnf("error removing checkpoint: %s", err)
|
||||||
|
}
|
||||||
|
if err := deadr.Close(); err != nil {
|
||||||
|
log.Warnf("error closing deadset: %s", err)
|
||||||
|
}
|
||||||
|
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||||
|
log.Warnf("error removing deadset: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// like walkChain but peforms a deep walk, using parallel walking with walkObjectLax,
|
||||||
|
// whereby all extant messages are retained and state roots are retained if they satisfy
|
||||||
|
// the given predicate.
|
||||||
|
// missing references are ignored, as we expect to have plenty for snapshot syncs.
|
||||||
|
func (s *SplitStore) walkChainDeep(ts *types.TipSet, retainStateP func(int64) bool,
|
||||||
|
f func(cid.Cid) error) error {
|
||||||
|
visited := cid.NewSet()
|
||||||
|
toWalk := ts.Cids()
|
||||||
|
walkCnt := 0
|
||||||
|
|
||||||
|
workers := runtime.NumCPU() / 2
|
||||||
|
if workers < 2 {
|
||||||
|
workers = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
workch := make(chan cid.Cid, 16*workers)
|
||||||
|
errch := make(chan error, workers)
|
||||||
|
|
||||||
|
var once sync.Once
|
||||||
|
defer once.Do(func() { close(workch) })
|
||||||
|
|
||||||
|
push := func(c cid.Cid) error {
|
||||||
|
if !visited.Visit(c) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case workch <- c:
|
||||||
|
return nil
|
||||||
|
case err := <-errch:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
worker := func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for c := range workch {
|
||||||
|
err := s.walkObjectLax(c, f)
|
||||||
|
if err != nil {
|
||||||
|
errch <- xerrors.Errorf("error walking object (cid: %s): %w", c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < workers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go worker()
|
||||||
|
}
|
||||||
|
|
||||||
|
baseEpoch := ts.Height()
|
||||||
|
minEpoch := baseEpoch // for progress report
|
||||||
|
log.Infof("walking at epoch %d", minEpoch)
|
||||||
|
|
||||||
|
walkBlock := func(c cid.Cid) error {
|
||||||
|
if !visited.Visit(c) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
walkCnt++
|
||||||
|
|
||||||
|
if err := f(c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var hdr types.BlockHeader
|
||||||
|
err := s.view(c, func(data []byte) error {
|
||||||
|
return hdr.UnmarshalCBOR(bytes.NewBuffer(data))
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error unmarshaling block header (cid: %s): %w", c, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Height < minEpoch {
|
||||||
|
minEpoch = hdr.Height
|
||||||
|
if minEpoch%10_000 == 0 {
|
||||||
|
log.Infof("walking at epoch %d (walked: %d)", minEpoch, walkCnt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
depth := int64(baseEpoch - hdr.Height)
|
||||||
|
retainState := retainStateP(depth)
|
||||||
|
|
||||||
|
if hdr.Height > 0 {
|
||||||
|
if err := push(hdr.Messages); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if retainState {
|
||||||
|
if err := push(hdr.ParentMessageReceipts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if retainState || hdr.Height == 0 {
|
||||||
|
if err := push(hdr.ParentStateRoot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Height > 0 {
|
||||||
|
toWalk = append(toWalk, hdr.Parents...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(toWalk) > 0 {
|
||||||
|
// walking can take a while, so check this with every opportunity
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errch:
|
||||||
|
return err
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
walking := toWalk
|
||||||
|
toWalk = nil
|
||||||
|
for _, c := range walking {
|
||||||
|
if err := walkBlock(c); err != nil {
|
||||||
|
return xerrors.Errorf("error walking block (cid: %s): %w", c, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
once.Do(func() { close(workch) })
|
||||||
|
wg.Wait()
|
||||||
|
select {
|
||||||
|
case err := <-errch:
|
||||||
|
return err
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("chain walk done", "walked", walkCnt)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// like walkObject but treats missing references laxly; faster version of walkObjectIncomplete
|
||||||
|
// without an occurs check.
|
||||||
|
func (s *SplitStore) walkObjectLax(c cid.Cid, f func(cid.Cid) error) error {
|
||||||
|
if err := f(c); err != nil {
|
||||||
|
if err == errStopWalk {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Prefix().Codec != cid.DagCBOR {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check this before recursing
|
||||||
|
if err := s.checkClosing(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var links []cid.Cid
|
||||||
|
err := s.view(c, func(data []byte) error {
|
||||||
|
return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) {
|
||||||
|
links = append(links, c)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if ipld.IsNotFound(err) { // not a problem for deep walks
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return xerrors.Errorf("error scanning linked block (cid: %s): %w", c, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range links {
|
||||||
|
err := s.walkObjectLax(c, f)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error walking link (cid: %s): %w", c, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -83,7 +83,7 @@ func (s *SplitStore) reifyWorker(workch chan cid.Cid) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *SplitStore) doReify(c cid.Cid) {
|
func (s *SplitStore) doReify(c cid.Cid) {
|
||||||
var toreify, totrack, toforget []cid.Cid
|
var toreify, toforget []cid.Cid
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.reifyMx.Lock()
|
s.reifyMx.Lock()
|
||||||
@ -92,9 +92,6 @@ func (s *SplitStore) doReify(c cid.Cid) {
|
|||||||
for _, c := range toreify {
|
for _, c := range toreify {
|
||||||
delete(s.reifyInProgress, c)
|
delete(s.reifyInProgress, c)
|
||||||
}
|
}
|
||||||
for _, c := range totrack {
|
|
||||||
delete(s.reifyInProgress, c)
|
|
||||||
}
|
|
||||||
for _, c := range toforget {
|
for _, c := range toforget {
|
||||||
delete(s.reifyInProgress, c)
|
delete(s.reifyInProgress, c)
|
||||||
}
|
}
|
||||||
@ -131,20 +128,11 @@ func (s *SplitStore) doReify(c cid.Cid) {
|
|||||||
return xerrors.Errorf("error checking hotstore: %w", err)
|
return xerrors.Errorf("error checking hotstore: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// All reified blocks are tracked at reification start
|
||||||
if has {
|
if has {
|
||||||
if s.txnMarkSet != nil {
|
|
||||||
hasMark, err := s.txnMarkSet.Has(c)
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("error checking markset: %s", err)
|
|
||||||
} else if hasMark {
|
|
||||||
toforget = append(toforget, c)
|
toforget = append(toforget, c)
|
||||||
return errStopWalk
|
return errStopWalk
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
totrack = append(totrack, c)
|
|
||||||
return errStopWalk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
toreify = append(toreify, c)
|
toreify = append(toreify, c)
|
||||||
return nil
|
return nil
|
||||||
@ -155,7 +143,7 @@ func (s *SplitStore) doReify(c cid.Cid) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if xerrors.Is(err, errReifyLimit) {
|
if errors.Is(err, errReifyLimit) {
|
||||||
log.Debug("reification aborted; reify limit reached")
|
log.Debug("reification aborted; reify limit reached")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -190,24 +178,4 @@ func (s *SplitStore) doReify(c cid.Cid) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.txnMarkSet != nil {
|
|
||||||
if len(toreify) > 0 {
|
|
||||||
if err := s.txnMarkSet.MarkMany(toreify); err != nil {
|
|
||||||
log.Warnf("error marking reified objects: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(totrack) > 0 {
|
|
||||||
if err := s.txnMarkSet.MarkMany(totrack); err != nil {
|
|
||||||
log.Warnf("error marking tracked objects: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// if txnActive is false these are noops
|
|
||||||
if len(toreify) > 0 {
|
|
||||||
s.trackTxnRefMany(toreify)
|
|
||||||
}
|
|
||||||
if len(totrack) > 0 {
|
|
||||||
s.trackTxnRefMany(totrack)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -21,14 +21,14 @@ var (
|
|||||||
WarmupBoundary = build.Finality
|
WarmupBoundary = build.Finality
|
||||||
)
|
)
|
||||||
|
|
||||||
// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore;
|
// warmup acquires the compaction lock and spawns a goroutine to warm up the hotstore;
|
||||||
// this is necessary when we sync from a snapshot or when we enable the splitstore
|
// this is necessary when we sync from a snapshot or when we enable the splitstore
|
||||||
// on top of an existing blockstore (which becomes the coldstore).
|
// on top of an existing blockstore (which becomes the coldstore).
|
||||||
func (s *SplitStore) warmup(curTs *types.TipSet) error {
|
func (s *SplitStore) warmup(curTs *types.TipSet) error {
|
||||||
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
||||||
return xerrors.Errorf("error locking compaction")
|
return xerrors.Errorf("error locking compaction")
|
||||||
}
|
}
|
||||||
|
s.compactType = warmup
|
||||||
go func() {
|
go func() {
|
||||||
defer atomic.StoreInt32(&s.compacting, 0)
|
defer atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
|
||||||
|
@ -176,11 +176,12 @@ func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, e
|
|||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
|
|
||||||
ch := make(chan cid.Cid, len(t.active)+len(t.inactive))
|
ch := make(chan cid.Cid, len(t.active)+len(t.inactive))
|
||||||
for c := range t.active {
|
for _, b := range t.active {
|
||||||
ch <- c
|
ch <- b.Cid()
|
||||||
}
|
}
|
||||||
for c := range t.inactive {
|
for _, b := range t.inactive {
|
||||||
if _, ok := t.active[c]; ok {
|
c := b.Cid()
|
||||||
|
if _, ok := t.active[string(c.Hash())]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ch <- c
|
ch <- c
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWDY249nj6gxSiBTocNBnkbBBvnfPgkxq5SBUVrccjwRnr
|
/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWDpppr8csCNvEPnD2Z83KTPdBTM7iJhL66qK8LK3bB5NU
|
||||||
/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWCwFsKt4NyoJQk7vjiACXL9LznBWANXKjCebKJ7MHh3h4
|
/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWR3K1sXWoDYcXWqDF26mFEM1o1g7e7fcVR3NYE7rn24Gs
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// BuildVersion is the local build version
|
// BuildVersion is the local build version
|
||||||
const BuildVersion = "1.17.0"
|
const BuildVersion = "1.17.1"
|
||||||
|
|
||||||
func UserVersion() string {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
|
@ -185,6 +185,10 @@ func GetDealFees(deal market{{.latestVersion}}.DealProposal, height abi.ChainEpo
|
|||||||
return ef, big.Sub(tf, ef)
|
return ef, big.Sub(tf, ef)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsDealActive(state market{{.latestVersion}}.DealState) bool {
|
||||||
|
return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
|
||||||
|
}
|
||||||
|
|
||||||
func labelFromGoString(s string) (market{{.latestVersion}}.DealLabel, error) {
|
func labelFromGoString(s string) (market{{.latestVersion}}.DealLabel, error) {
|
||||||
if utf8.ValidString(s) {
|
if utf8.ValidString(s) {
|
||||||
return market{{.latestVersion}}.NewLabelFromString(s)
|
return market{{.latestVersion}}.NewLabelFromString(s)
|
||||||
|
@ -240,6 +240,10 @@ func GetDealFees(deal market8.DealProposal, height abi.ChainEpoch) (abi.TokenAmo
|
|||||||
return ef, big.Sub(tf, ef)
|
return ef, big.Sub(tf, ef)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsDealActive(state market8.DealState) bool {
|
||||||
|
return state.SectorStartEpoch > -1 && state.SlashEpoch == -1
|
||||||
|
}
|
||||||
|
|
||||||
func labelFromGoString(s string) (market8.DealLabel, error) {
|
func labelFromGoString(s string) (market8.DealLabel, error) {
|
||||||
if utf8.ValidString(s) {
|
if utf8.ValidString(s) {
|
||||||
return market8.NewLabelFromString(s)
|
return market8.NewLabelFromString(s)
|
||||||
|
@ -82,7 +82,17 @@ type FilecoinBlockMessages struct {
|
|||||||
WinCount int64
|
WinCount int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []FilecoinBlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
func (t *TipSetExecutor) ApplyBlocks(ctx context.Context,
|
||||||
|
sm *stmgr.StateManager,
|
||||||
|
parentEpoch abi.ChainEpoch,
|
||||||
|
pstate cid.Cid,
|
||||||
|
bms []FilecoinBlockMessages,
|
||||||
|
epoch abi.ChainEpoch,
|
||||||
|
r vm.Rand,
|
||||||
|
em stmgr.ExecMonitor,
|
||||||
|
vmTracing bool,
|
||||||
|
baseFee abi.TokenAmount,
|
||||||
|
ts *types.TipSet) (cid.Cid, cid.Cid, error) {
|
||||||
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
|
done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
@ -104,6 +114,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
|||||||
NetworkVersion: sm.GetNetworkVersion(ctx, e),
|
NetworkVersion: sm.GetNetworkVersion(ctx, e),
|
||||||
BaseFee: baseFee,
|
BaseFee: baseFee,
|
||||||
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts),
|
||||||
|
Tracing: vmTracing,
|
||||||
}
|
}
|
||||||
|
|
||||||
return sm.VMConstructor()(ctx, vmopt)
|
return sm.VMConstructor()(ctx, vmopt)
|
||||||
@ -269,7 +280,11 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager
|
|||||||
return st, rectroot, nil
|
return st, rectroot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) {
|
func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context,
|
||||||
|
sm *stmgr.StateManager,
|
||||||
|
ts *types.TipSet,
|
||||||
|
em stmgr.ExecMonitor,
|
||||||
|
vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error) {
|
||||||
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
|
ctx, span := trace.StartSpan(ctx, "computeTipSetState")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
@ -309,7 +324,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag
|
|||||||
}
|
}
|
||||||
baseFee := blks[0].ParentBaseFee
|
baseFee := blks[0].ParentBaseFee
|
||||||
|
|
||||||
return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, fbmsgs, blks[0].Height, r, em, baseFee, ts)
|
return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, fbmsgs, blks[0].Height, r, em, vmTracing, baseFee, ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ stmgr.Executor = &TipSetExecutor{}
|
var _ stmgr.Executor = &TipSetExecutor{}
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
@ -88,6 +89,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.
|
|||||||
NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1),
|
NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1),
|
||||||
BaseFee: types.NewInt(0),
|
BaseFee: types.NewInt(0),
|
||||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||||
|
Tracing: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
vmi, err := sm.newVM(ctx, vmopt)
|
vmi, err := sm.newVM(ctx, vmopt)
|
||||||
@ -156,6 +158,10 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
ctx, span := trace.StartSpan(ctx, "statemanager.CallWithGas")
|
ctx, span := trace.StartSpan(ctx, "statemanager.CallWithGas")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
|
// Copy the message as we'll be modifying the nonce.
|
||||||
|
msgCopy := *msg
|
||||||
|
msg = &msgCopy
|
||||||
|
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
ts = sm.cs.GetHeaviestTipSet()
|
ts = sm.cs.GetHeaviestTipSet()
|
||||||
|
|
||||||
@ -221,6 +227,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1),
|
NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1),
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||||
|
Tracing: true,
|
||||||
}
|
}
|
||||||
vmi, err := sm.newVM(ctx, vmopt)
|
vmi, err := sm.newVM(ctx, vmopt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -273,9 +280,21 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the fee cap is set to zero, make gas free.
|
||||||
|
if msg.GasFeeCap.NilOrZero() {
|
||||||
|
// Now estimate with a new VM with no base fee.
|
||||||
|
vmopt.BaseFee = big.Zero()
|
||||||
|
vmopt.StateBase = stateCid
|
||||||
|
|
||||||
|
vmi, err = sm.newVM(ctx, vmopt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("failed to set up estimation vm: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret, err := vmi.ApplyMessage(ctx, msgApply)
|
ret, err := vmi.ApplyMessage(ctx, msgApply)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("apply message failed: %w", err)
|
return nil, xerrors.Errorf("gas estimation failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var errs string
|
var errs string
|
||||||
@ -301,7 +320,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C
|
|||||||
// message to find
|
// message to find
|
||||||
finder.mcid = mcid
|
finder.mcid = mcid
|
||||||
|
|
||||||
_, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, &finder)
|
_, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, &finder, true)
|
||||||
if err != nil && !xerrors.Is(err, errHaltExecution) {
|
if err != nil && !xerrors.Is(err, errHaltExecution) {
|
||||||
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
|
return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -60,7 +60,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
|
|||||||
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
|
return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor)
|
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return cid.Undef, cid.Undef, err
|
return cid.Undef, cid.Undef, err
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
|
func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) {
|
||||||
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em)
|
st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em, true)
|
||||||
return st, err
|
return st, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ type migration struct {
|
|||||||
|
|
||||||
type Executor interface {
|
type Executor interface {
|
||||||
NewActorRegistry() *vm.ActorRegistry
|
NewActorRegistry() *vm.ActorRegistry
|
||||||
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error)
|
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor, vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type StateManager struct {
|
type StateManager struct {
|
||||||
|
@ -94,6 +94,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
|
|||||||
NetworkVersion: sm.GetNetworkVersion(ctx, height),
|
NetworkVersion: sm.GetNetworkVersion(ctx, height),
|
||||||
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
BaseFee: ts.Blocks()[0].ParentBaseFee,
|
||||||
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
LookbackState: LookbackStateGetterForTipset(sm, ts),
|
||||||
|
Tracing: true,
|
||||||
}
|
}
|
||||||
vmi, err := sm.newVM(ctx, vmopt)
|
vmi, err := sm.newVM(ctx, vmopt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -5,9 +5,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
@ -281,7 +283,7 @@ func defaultFVMOpts(ctx context.Context, opts *VMOpts) (*ffi.FVMOpts, error) {
|
|||||||
BaseCircSupply: circToReport,
|
BaseCircSupply: circToReport,
|
||||||
NetworkVersion: opts.NetworkVersion,
|
NetworkVersion: opts.NetworkVersion,
|
||||||
StateBase: opts.StateBase,
|
StateBase: opts.StateBase,
|
||||||
Tracing: EnableDetailedTracing,
|
Tracing: opts.Tracing || EnableDetailedTracing,
|
||||||
}, nil
|
}, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -418,6 +420,7 @@ func NewDebugFVM(ctx context.Context, opts *VMOpts) (*FVM, error) {
|
|||||||
|
|
||||||
func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) {
|
||||||
start := build.Clock.Now()
|
start := build.Clock.Now()
|
||||||
|
defer atomic.AddUint64(&StatApplied, 1)
|
||||||
vmMsg := cmsg.VMMessage()
|
vmMsg := cmsg.VMMessage()
|
||||||
msgBytes, err := vmMsg.Serialize()
|
msgBytes, err := vmMsg.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -481,6 +484,8 @@ func (vm *FVM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet
|
|||||||
|
|
||||||
func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) {
|
func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (*ApplyRet, error) {
|
||||||
start := build.Clock.Now()
|
start := build.Clock.Now()
|
||||||
|
defer atomic.AddUint64(&StatApplied, 1)
|
||||||
|
cmsg.GasLimit = math.MaxInt64 / 2
|
||||||
vmMsg := cmsg.VMMessage()
|
vmMsg := cmsg.VMMessage()
|
||||||
msgBytes, err := vmMsg.Serialize()
|
msgBytes, err := vmMsg.Serialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -59,7 +59,9 @@ func (m *Message) ValueReceived() abi.TokenAmount {
|
|||||||
return m.msg.Value
|
return m.msg.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnableDetailedTracing, if true, outputs gas tracing in execution traces.
|
// EnableDetailedTracing has different behaviour in the LegacyVM and FVM.
|
||||||
|
// In the LegacyVM, it enables detailed gas tracing, slowing down execution.
|
||||||
|
// In the FVM, it enables execution traces, which are primarily used to observe subcalls.
|
||||||
var EnableDetailedTracing = os.Getenv("LOTUS_VM_ENABLE_TRACING") == "1"
|
var EnableDetailedTracing = os.Getenv("LOTUS_VM_ENABLE_TRACING") == "1"
|
||||||
|
|
||||||
type Runtime struct {
|
type Runtime struct {
|
||||||
|
@ -45,12 +45,6 @@ var (
|
|||||||
gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
gasOnActorExec = newGasCharge("OnActorExec", 0, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
// stat counters
|
|
||||||
var (
|
|
||||||
StatSends uint64
|
|
||||||
StatApplied uint64
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`.
|
// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`.
|
||||||
func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, error) {
|
func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, error) {
|
||||||
if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 {
|
if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 {
|
||||||
@ -229,6 +223,7 @@ type VMOpts struct {
|
|||||||
NetworkVersion network.Version
|
NetworkVersion network.Version
|
||||||
BaseFee abi.TokenAmount
|
BaseFee abi.TokenAmount
|
||||||
LookbackState LookbackStateGetter
|
LookbackState LookbackStateGetter
|
||||||
|
Tracing bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
|
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
|
||||||
|
@ -11,6 +11,12 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// stat counters
|
||||||
|
var (
|
||||||
|
StatSends uint64
|
||||||
|
StatApplied uint64
|
||||||
|
)
|
||||||
|
|
||||||
type Interface interface {
|
type Interface interface {
|
||||||
// Applies the given message onto the VM's current state, returning the result of the execution
|
// Applies the given message onto the VM's current state, returning the result of the execution
|
||||||
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
|
ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error)
|
||||||
@ -21,8 +27,6 @@ type Interface interface {
|
|||||||
Flush(ctx context.Context) (cid.Cid, error)
|
Flush(ctx context.Context) (cid.Cid, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
var useFvmForMainnetV15 = os.Getenv("LOTUS_USE_FVM_TO_SYNC_MAINNET_V15") == "1"
|
|
||||||
|
|
||||||
// WARNING: You will not affect your node's execution by misusing this feature, but you will confuse yourself thoroughly!
|
// WARNING: You will not affect your node's execution by misusing this feature, but you will confuse yourself thoroughly!
|
||||||
// An envvar that allows the user to specify debug actors bundles to be used by the FVM
|
// An envvar that allows the user to specify debug actors bundles to be used by the FVM
|
||||||
// alongside regular execution. This is basically only to be used to print out specific logging information.
|
// alongside regular execution. This is basically only to be used to print out specific logging information.
|
||||||
@ -37,13 +41,5 @@ func NewVM(ctx context.Context, opts *VMOpts) (Interface, error) {
|
|||||||
return NewFVM(ctx, opts)
|
return NewFVM(ctx, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove after v16 upgrade, this is only to support testing and validation of the FVM
|
|
||||||
if useFvmForMainnetV15 && opts.NetworkVersion >= network.Version15 {
|
|
||||||
if useFvmDebug {
|
|
||||||
return NewDualExecutionFVM(ctx, opts)
|
|
||||||
}
|
|
||||||
return NewFVM(ctx, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return NewLegacyVM(ctx, opts)
|
return NewLegacyVM(ctx, opts)
|
||||||
}
|
}
|
||||||
|
46
cli/chain.go
46
cli/chain.go
@ -1462,3 +1462,49 @@ func createExportFile(app *cli.App, path string) (io.WriteCloser, error) {
|
|||||||
}
|
}
|
||||||
return fi, nil
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var ChainPruneCmd = &cli.Command{
|
||||||
|
Name: "prune",
|
||||||
|
Usage: "prune the stored chain state and perform garbage collection",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "online-gc",
|
||||||
|
Value: false,
|
||||||
|
Usage: "use online gc for garbage collecting the coldstore",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "moving-gc",
|
||||||
|
Value: false,
|
||||||
|
Usage: "use moving gc for garbage collecting the coldstore",
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "move-to",
|
||||||
|
Value: "",
|
||||||
|
Usage: "specify new path for coldstore during moving gc",
|
||||||
|
},
|
||||||
|
&cli.IntFlag{
|
||||||
|
Name: "retention",
|
||||||
|
Value: -1,
|
||||||
|
Usage: "specify state retention policy",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := GetFullNodeAPIV1(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := ReqContext(cctx)
|
||||||
|
|
||||||
|
opts := lapi.PruneOpts{}
|
||||||
|
if cctx.Bool("online-gc") {
|
||||||
|
opts.MovingGC = false
|
||||||
|
}
|
||||||
|
if cctx.Bool("moving-gc") {
|
||||||
|
opts.MovingGC = true
|
||||||
|
}
|
||||||
|
opts.RetainState = int64(cctx.Int("retention"))
|
||||||
|
|
||||||
|
return api.ChainPrune(ctx, opts)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
|
builtin2 "github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
@ -218,6 +219,17 @@ var actorWithdrawCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
|
amount := abi.NewTokenAmount(0)
|
||||||
|
|
||||||
|
if cctx.Args().Present() {
|
||||||
|
f, err := types.ParseFIL(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
amount = abi.TokenAmount(f)
|
||||||
|
}
|
||||||
|
|
||||||
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -232,67 +244,19 @@ var actorWithdrawCmd = &cli.Command{
|
|||||||
|
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
maddr, err := nodeApi.ActorAddress(ctx)
|
res, err := nodeApi.ActorWithdrawBalance(ctx, amount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mi, err := api.StateMinerInfo(ctx, maddr, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
available, err := api.StateMinerAvailableBalance(ctx, maddr, types.EmptyTSK)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
amount := available
|
|
||||||
if cctx.Args().Present() {
|
|
||||||
f, err := types.ParseFIL(cctx.Args().First())
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("parsing 'amount' argument: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
amount = abi.TokenAmount(f)
|
|
||||||
|
|
||||||
if amount.GreaterThan(available) {
|
|
||||||
return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amount), types.FIL(available))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params, err := actors.SerializeParams(&miner.WithdrawBalanceParams{
|
|
||||||
AmountRequested: amount, // Default to attempting to withdraw all the extra funds in the miner actor
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
smsg, err := api.MpoolPushMessage(ctx, &types.Message{
|
|
||||||
To: maddr,
|
|
||||||
From: mi.Owner,
|
|
||||||
Value: types.NewInt(0),
|
|
||||||
Method: builtin.MethodsMiner.WithdrawBalance,
|
|
||||||
Params: params,
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid())
|
|
||||||
|
|
||||||
// wait for it to get mined into a block
|
// wait for it to get mined into a block
|
||||||
fmt.Printf("waiting for %d epochs for confirmation..\n", uint64(cctx.Int("confidence")))
|
wait, err := api.StateWaitMsg(ctx, res, uint64(cctx.Int("confidence")))
|
||||||
|
|
||||||
wait, err := api.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence")))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return xerrors.Errorf("Timeout waiting for withdrawal message %s", wait.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check it executed successfully
|
|
||||||
if wait.Receipt.ExitCode != 0 {
|
if wait.Receipt.ExitCode != 0 {
|
||||||
fmt.Println(cctx.App.Writer, "withdrawal failed!")
|
return xerrors.Errorf("Failed to execute withdrawal message %s: %w", wait.Message, wait.Receipt.ExitCode.Error())
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nv, err := api.StateNetworkVersion(ctx, wait.TipSet)
|
nv, err := api.StateNetworkVersion(ctx, wait.TipSet)
|
||||||
@ -448,7 +412,7 @@ var actorControlList = &cli.Command{
|
|||||||
}
|
}
|
||||||
defer closer()
|
defer closer()
|
||||||
|
|
||||||
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
api, acloser, err := lcli.GetFullNodeAPIV1(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -530,18 +494,21 @@ var actorControlList = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
printKey := func(name string, a address.Address) {
|
printKey := func(name string, a address.Address) {
|
||||||
b, err := api.WalletBalance(ctx, a)
|
var actor *types.Actor
|
||||||
if err != nil {
|
if actor, err = api.StateGetActor(ctx, a, types.EmptyTSK); err != nil {
|
||||||
fmt.Printf("%s\t%s: error getting balance: %s\n", name, a, err)
|
fmt.Printf("%s\t%s: error getting actor: %s\n", name, a, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
b := actor.Balance
|
||||||
|
|
||||||
k, err := api.StateAccountKey(ctx, a, types.EmptyTSK)
|
var k = a
|
||||||
if err != nil {
|
// 'a' maybe a 'robust', in that case, 'StateAccountKey' returns an error.
|
||||||
|
if builtin2.IsAccountActor(actor.Code) {
|
||||||
|
if k, err = api.StateAccountKey(ctx, a, types.EmptyTSK); err != nil {
|
||||||
fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err)
|
fmt.Printf("%s\t%s: error getting account key: %s\n", name, a, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
kstr := k.String()
|
kstr := k.String()
|
||||||
if !cctx.Bool("verbose") {
|
if !cctx.Bool("verbose") {
|
||||||
kstr = kstr[:9] + "..."
|
kstr = kstr[:9] + "..."
|
||||||
|
@ -458,7 +458,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
|||||||
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
wsts := statestore.New(namespace.Wrap(mds, modules.WorkerCallsPrefix))
|
||||||
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
smsts := statestore.New(namespace.Wrap(mds, modules.ManagerWorkPrefix))
|
||||||
|
|
||||||
si := paths.NewIndex()
|
si := paths.NewIndex(nil)
|
||||||
|
|
||||||
lstor, err := paths.NewLocal(ctx, lr, si, nil)
|
lstor, err := paths.NewLocal(ctx, lr, si, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,6 +15,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/blockstore"
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
@ -197,6 +200,13 @@ var provingInfoCmd = &cli.Command{
|
|||||||
var provingDeadlinesCmd = &cli.Command{
|
var provingDeadlinesCmd = &cli.Command{
|
||||||
Name: "deadlines",
|
Name: "deadlines",
|
||||||
Usage: "View the current proving period deadlines information",
|
Usage: "View the current proving period deadlines information",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "all",
|
||||||
|
Usage: "Count all sectors (only live sectors are counted by default)",
|
||||||
|
Aliases: []string{"a"},
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
api, acloser, err := lcli.GetFullNodeAPI(cctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -239,14 +249,29 @@ var provingDeadlinesCmd = &cli.Command{
|
|||||||
|
|
||||||
sectors := uint64(0)
|
sectors := uint64(0)
|
||||||
faults := uint64(0)
|
faults := uint64(0)
|
||||||
|
var partitionCount int
|
||||||
|
|
||||||
for _, partition := range partitions {
|
for _, partition := range partitions {
|
||||||
|
if !cctx.Bool("all") {
|
||||||
|
sc, err := partition.LiveSectors.Count()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc > 0 {
|
||||||
|
partitionCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
sectors += sc
|
||||||
|
} else {
|
||||||
sc, err := partition.AllSectors.Count()
|
sc, err := partition.AllSectors.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
partitionCount++
|
||||||
sectors += sc
|
sectors += sc
|
||||||
|
}
|
||||||
|
|
||||||
fc, err := partition.FaultySectors.Count()
|
fc, err := partition.FaultySectors.Count()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -260,7 +285,7 @@ var provingDeadlinesCmd = &cli.Command{
|
|||||||
if di.Index == uint64(dlIdx) {
|
if di.Index == uint64(dlIdx) {
|
||||||
cur += "\t(current)"
|
cur += "\t(current)"
|
||||||
}
|
}
|
||||||
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, len(partitions), sectors, faults, provenPartitions, cur)
|
_, _ = fmt.Fprintf(tw, "%d\t%d\t%d (%d)\t%d%s\n", dlIdx, partitionCount, sectors, faults, provenPartitions, cur)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tw.Flush()
|
return tw.Flush()
|
||||||
@ -276,6 +301,11 @@ var provingDeadlineInfoCmd = &cli.Command{
|
|||||||
Aliases: []string{"n"},
|
Aliases: []string{"n"},
|
||||||
Usage: "Print sector/fault numbers belonging to this deadline",
|
Usage: "Print sector/fault numbers belonging to this deadline",
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "bitfield",
|
||||||
|
Aliases: []string{"b"},
|
||||||
|
Usage: "Print partition bitfield stats",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
ArgsUsage: "<deadlineIdx>",
|
ArgsUsage: "<deadlineIdx>",
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
@ -328,34 +358,75 @@ var provingDeadlineInfoCmd = &cli.Command{
|
|||||||
fmt.Printf("Current: %t\n\n", di.Index == dlIdx)
|
fmt.Printf("Current: %t\n\n", di.Index == dlIdx)
|
||||||
|
|
||||||
for pIdx, partition := range partitions {
|
for pIdx, partition := range partitions {
|
||||||
sectorCount, err := partition.AllSectors.Count()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
sectorNumbers, err := partition.AllSectors.All(sectorCount)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
faultsCount, err := partition.FaultySectors.Count()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fn, err := partition.FaultySectors.All(faultsCount)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Partition Index: %d\n", pIdx)
|
fmt.Printf("Partition Index: %d\n", pIdx)
|
||||||
fmt.Printf("\tSectors: %d\n", sectorCount)
|
|
||||||
if cctx.Bool("sector-nums") {
|
printStats := func(bf bitfield.BitField, name string) error {
|
||||||
fmt.Printf("\tSector Numbers: %v\n", sectorNumbers)
|
count, err := bf.Count()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
fmt.Printf("\tFaults: %d\n", faultsCount)
|
|
||||||
|
rit, err := bf.RunIterator()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("bitfield") {
|
||||||
|
var ones, zeros, oneRuns, zeroRuns, invalid uint64
|
||||||
|
for rit.HasNext() {
|
||||||
|
r, err := rit.NextRun()
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("next run: %w", err)
|
||||||
|
}
|
||||||
|
if !r.Valid() {
|
||||||
|
invalid++
|
||||||
|
}
|
||||||
|
if r.Val {
|
||||||
|
ones += r.Len
|
||||||
|
oneRuns++
|
||||||
|
} else {
|
||||||
|
zeros += r.Len
|
||||||
|
zeroRuns++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := bf.MarshalCBOR(&buf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sz := len(buf.Bytes())
|
||||||
|
szstr := types.SizeStr(types.NewInt(uint64(sz)))
|
||||||
|
|
||||||
|
fmt.Printf("\t%s Sectors:%s%d (bitfield - runs %d+%d=%d - %d 0s %d 1s - %d inv - %s %dB)\n", name, strings.Repeat(" ", 18-len(name)), count, zeroRuns, oneRuns, zeroRuns+oneRuns, zeros, ones, invalid, szstr, sz)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("\t%s Sectors:%s%d\n", name, strings.Repeat(" ", 18-len(name)), count)
|
||||||
|
}
|
||||||
|
|
||||||
if cctx.Bool("sector-nums") {
|
if cctx.Bool("sector-nums") {
|
||||||
fmt.Printf("\tFaulty Sectors: %d\n", fn)
|
nums, err := bf.All(count)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("\t%s Sector Numbers:%s%v\n", name, strings.Repeat(" ", 12-len(name)), nums)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := printStats(partition.AllSectors, "All"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := printStats(partition.LiveSectors, "Live"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := printStats(partition.ActiveSectors, "Active"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := printStats(partition.FaultySectors, "Faulty"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := printStats(partition.RecoveringSectors, "Recovering"); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -365,6 +365,12 @@ var sealingAbortCmd = &cli.Command{
|
|||||||
Name: "abort",
|
Name: "abort",
|
||||||
Usage: "Abort a running job",
|
Usage: "Abort a running job",
|
||||||
ArgsUsage: "[callid]",
|
ArgsUsage: "[callid]",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "sched",
|
||||||
|
Usage: "Specifies that the argument is UUID of the request to be removed from scheduler",
|
||||||
|
},
|
||||||
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if cctx.Args().Len() != 1 {
|
if cctx.Args().Len() != 1 {
|
||||||
return xerrors.Errorf("expected 1 argument")
|
return xerrors.Errorf("expected 1 argument")
|
||||||
@ -378,6 +384,14 @@ var sealingAbortCmd = &cli.Command{
|
|||||||
|
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.Bool("sched") {
|
||||||
|
err = nodeApi.SealingRemoveRequest(ctx, uuid.Must(uuid.Parse(cctx.Args().First())))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("Failed to removed the request with UUID %s: %w", cctx.Args().First(), err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
jobs, err := nodeApi.WorkerJobs(ctx)
|
jobs, err := nodeApi.WorkerJobs(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("getting worker jobs: %w", err)
|
return xerrors.Errorf("getting worker jobs: %w", err)
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/bits"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
@ -45,6 +46,8 @@ long term for proving (references as 'store') as well as how sectors will be
|
|||||||
stored while moving through the sealing pipeline (references as 'seal').`,
|
stored while moving through the sealing pipeline (references as 'seal').`,
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
storageAttachCmd,
|
storageAttachCmd,
|
||||||
|
storageDetachCmd,
|
||||||
|
storageRedeclareCmd,
|
||||||
storageListCmd,
|
storageListCmd,
|
||||||
storageFindCmd,
|
storageFindCmd,
|
||||||
storageCleanupCmd,
|
storageCleanupCmd,
|
||||||
@ -173,6 +176,82 @@ over time
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var storageDetachCmd = &cli.Command{
|
||||||
|
Name: "detach",
|
||||||
|
Usage: "detach local storage path",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "really-do-it",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: "[path]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if !cctx.Args().Present() {
|
||||||
|
return xerrors.Errorf("must specify storage path")
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := homedir.Expand(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cctx.Bool("really-do-it") {
|
||||||
|
return xerrors.Errorf("pass --really-do-it to execute the action")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeApi.StorageDetachLocal(ctx, p)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageRedeclareCmd = &cli.Command{
|
||||||
|
Name: "redeclare",
|
||||||
|
Usage: "redeclare sectors in a local storage path",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "id",
|
||||||
|
Usage: "storage path ID",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "all",
|
||||||
|
Usage: "redeclare all storage paths",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "drop-missing",
|
||||||
|
Usage: "Drop index entries with missing files",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.IsSet("id") && cctx.Bool("all") {
|
||||||
|
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.IsSet("id") {
|
||||||
|
id := storiface.ID(cctx.String("id"))
|
||||||
|
return nodeApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("all") {
|
||||||
|
return nodeApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return xerrors.Errorf("either --all or --id must be specified")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var storageListCmd = &cli.Command{
|
var storageListCmd = &cli.Command{
|
||||||
Name: "list",
|
Name: "list",
|
||||||
Usage: "list local storage paths",
|
Usage: "list local storage paths",
|
||||||
@ -345,6 +424,20 @@ var storageListCmd = &cli.Command{
|
|||||||
fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", "))
|
fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(si.AllowTypes) > 0 || len(si.DenyTypes) > 0 {
|
||||||
|
denied := storiface.FTAll.SubAllowed(si.AllowTypes, si.DenyTypes)
|
||||||
|
allowed := storiface.FTAll ^ denied
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case bits.OnesCount64(uint64(allowed)) == 0:
|
||||||
|
fmt.Printf("\tAllow Types: %s\n", color.RedString("None"))
|
||||||
|
case bits.OnesCount64(uint64(allowed)) < bits.OnesCount64(uint64(denied)):
|
||||||
|
fmt.Printf("\tAllow Types: %s\n", color.GreenString(strings.Join(allowed.Strings(), " ")))
|
||||||
|
default:
|
||||||
|
fmt.Printf("\tDeny Types: %s\n", color.RedString(strings.Join(denied.Strings(), " ")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if localPath, ok := local[s.ID]; ok {
|
if localPath, ok := local[s.ID]; ok {
|
||||||
fmt.Printf("\tLocal: %s\n", color.GreenString(localPath))
|
fmt.Printf("\tLocal: %s\n", color.GreenString(localPath))
|
||||||
}
|
}
|
||||||
|
41
cmd/lotus-shed/address.go
Normal file
41
cmd/lotus-shed/address.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
)
|
||||||
|
|
||||||
|
var addressCmd = &cli.Command{
|
||||||
|
Name: "addr",
|
||||||
|
Usage: "decode hex bytes into address",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
addrHex := cctx.Args().First()
|
||||||
|
bs, err := hex.DecodeString(addrHex)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// first try cbor
|
||||||
|
var a address.Address
|
||||||
|
err = a.UnmarshalCBOR((bytes.NewReader(bs)))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("failed to unmarshal as CBOR, trying raw\n")
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s\n", a)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// next try raw payload
|
||||||
|
a, err = address.NewFromBytes(bs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.New("could not decode as CBOR or raw payload, failing")
|
||||||
|
}
|
||||||
|
fmt.Printf("%s\n", a)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
@ -16,6 +16,7 @@ func main() {
|
|||||||
logging.SetLogLevel("*", "INFO")
|
logging.SetLogLevel("*", "INFO")
|
||||||
|
|
||||||
local := []*cli.Command{
|
local := []*cli.Command{
|
||||||
|
addressCmd,
|
||||||
base64Cmd,
|
base64Cmd,
|
||||||
base32Cmd,
|
base32Cmd,
|
||||||
base16Cmd,
|
base16Cmd,
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/lib/backupds"
|
"github.com/filecoin-project/lotus/lib/backupds"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
@ -32,6 +33,7 @@ var marketCmd = &cli.Command{
|
|||||||
marketDealFeesCmd,
|
marketDealFeesCmd,
|
||||||
marketExportDatastoreCmd,
|
marketExportDatastoreCmd,
|
||||||
marketImportDatastoreCmd,
|
marketImportDatastoreCmd,
|
||||||
|
marketDealsTotalStorageCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,6 +285,42 @@ var marketImportDatastoreCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var marketDealsTotalStorageCmd = &cli.Command{
|
||||||
|
Name: "get-deals-total-storage",
|
||||||
|
Usage: "View the total storage available in all active market deals",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
deals, err := api.StateMarketDeals(ctx, types.EmptyTSK)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
total := big.Zero()
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, deal := range deals {
|
||||||
|
if market.IsDealActive(deal.State) {
|
||||||
|
dealStorage := big.NewIntUnsigned(uint64(deal.Proposal.PieceSize))
|
||||||
|
total = big.Add(total, dealStorage)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Total deals: ", count)
|
||||||
|
fmt.Println("Total storage: ", total)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
func openLockedRepo(path string) (repo.LockedRepo, error) {
|
func openLockedRepo(path string) (repo.LockedRepo, error) {
|
||||||
// Open the repo at the repo path
|
// Open the repo at the repo path
|
||||||
rpo, err := repo.NewFS(path)
|
rpo, err := repo.NewFS(path)
|
||||||
|
@ -55,6 +55,7 @@ func main() {
|
|||||||
|
|
||||||
local := []*cli.Command{
|
local := []*cli.Command{
|
||||||
runCmd,
|
runCmd,
|
||||||
|
stopCmd,
|
||||||
infoCmd,
|
infoCmd,
|
||||||
storageCmd,
|
storageCmd,
|
||||||
setCmd,
|
setCmd,
|
||||||
@ -93,12 +94,13 @@ func main() {
|
|||||||
Name: "enable-gpu-proving",
|
Name: "enable-gpu-proving",
|
||||||
Usage: "enable use of GPU for mining operations",
|
Usage: "enable use of GPU for mining operations",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_ENABLE_GPU_PROVING"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
After: func(c *cli.Context) error {
|
After: func(c *cli.Context) error {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
// Generate report in LOTUS_PATH and re-raise panic
|
// Generate report in LOTUS_PANIC_REPORT_PATH and re-raise panic
|
||||||
build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagWorkerRepo), c.App.Name)
|
build.GeneratePanicReport(c.String("panic-reports"), c.String(FlagWorkerRepo), c.App.Name)
|
||||||
panic(r)
|
panic(r)
|
||||||
}
|
}
|
||||||
@ -115,6 +117,34 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stopCmd = &cli.Command{
|
||||||
|
Name: "stop",
|
||||||
|
Usage: "Stop a running lotus worker",
|
||||||
|
Flags: []cli.Flag{},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
api, closer, err := lcli.GetWorkerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
// Detach any storage associated with this worker
|
||||||
|
err = api.StorageDetachAll(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = api.Shutdown(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
var runCmd = &cli.Command{
|
var runCmd = &cli.Command{
|
||||||
Name: "run",
|
Name: "run",
|
||||||
Usage: "Start lotus worker",
|
Usage: "Start lotus worker",
|
||||||
@ -123,6 +153,7 @@ var runCmd = &cli.Command{
|
|||||||
Name: "listen",
|
Name: "listen",
|
||||||
Usage: "host address and port the worker api will listen on",
|
Usage: "host address and port the worker api will listen on",
|
||||||
Value: "0.0.0.0:3456",
|
Value: "0.0.0.0:3456",
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_LISTEN"},
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "address",
|
Name: "address",
|
||||||
@ -131,86 +162,109 @@ var runCmd = &cli.Command{
|
|||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "no-local-storage",
|
Name: "no-local-storage",
|
||||||
Usage: "don't use storageminer repo for sector storage",
|
Usage: "don't use storageminer repo for sector storage",
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_NO_LOCAL_STORAGE"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "no-swap",
|
Name: "no-swap",
|
||||||
Usage: "don't use swap",
|
Usage: "don't use swap",
|
||||||
Value: false,
|
Value: false,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_NO_SWAP"},
|
||||||
|
},
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "name",
|
||||||
|
Usage: "custom worker name",
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_NAME"},
|
||||||
|
DefaultText: "hostname",
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "addpiece",
|
Name: "addpiece",
|
||||||
Usage: "enable addpiece",
|
Usage: "enable addpiece",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_ADDPIECE"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "precommit1",
|
Name: "precommit1",
|
||||||
Usage: "enable precommit1 (32G sectors: 1 core, 128GiB Memory)",
|
Usage: "enable precommit1 (32G sectors: 1 core, 128GiB Memory)",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_PRECOMMIT1"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "unseal",
|
Name: "unseal",
|
||||||
Usage: "enable unsealing (32G sectors: 1 core, 128GiB Memory)",
|
Usage: "enable unsealing (32G sectors: 1 core, 128GiB Memory)",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_UNSEAL"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "precommit2",
|
Name: "precommit2",
|
||||||
Usage: "enable precommit2 (32G sectors: all cores, 96GiB Memory)",
|
Usage: "enable precommit2 (32G sectors: all cores, 96GiB Memory)",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_PRECOMMIT2"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "commit",
|
Name: "commit",
|
||||||
Usage: "enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap)",
|
Usage: "enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap)",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_COMMIT"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "replica-update",
|
Name: "replica-update",
|
||||||
Usage: "enable replica update",
|
Usage: "enable replica update",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_REPLICA_UPDATE"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "prove-replica-update2",
|
Name: "prove-replica-update2",
|
||||||
Usage: "enable prove replica update 2",
|
Usage: "enable prove replica update 2",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_PROVE_REPLICA_UPDATE2"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "regen-sector-key",
|
Name: "regen-sector-key",
|
||||||
Usage: "enable regen sector key",
|
Usage: "enable regen sector key",
|
||||||
Value: true,
|
Value: true,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_REGEN_SECTOR_KEY"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "windowpost",
|
Name: "windowpost",
|
||||||
Usage: "enable window post",
|
Usage: "enable window post",
|
||||||
Value: false,
|
Value: false,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_WINDOWPOST"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "winningpost",
|
Name: "winningpost",
|
||||||
Usage: "enable winning post",
|
Usage: "enable winning post",
|
||||||
Value: false,
|
Value: false,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_WINNINGPOST"},
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "no-default",
|
Name: "no-default",
|
||||||
Usage: "disable all default compute tasks, use the worker for storage/fetching only",
|
Usage: "disable all default compute tasks, use the worker for storage/fetching only",
|
||||||
Value: false,
|
Value: false,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_NO_DEFAULT"},
|
||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "parallel-fetch-limit",
|
Name: "parallel-fetch-limit",
|
||||||
Usage: "maximum fetch operations to run in parallel",
|
Usage: "maximum fetch operations to run in parallel",
|
||||||
Value: 5,
|
Value: 5,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_PARALLEL_FETCH_LIMIT"},
|
||||||
},
|
},
|
||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "post-parallel-reads",
|
Name: "post-parallel-reads",
|
||||||
Usage: "maximum number of parallel challenge reads (0 = no limit)",
|
Usage: "maximum number of parallel challenge reads (0 = no limit)",
|
||||||
Value: 128,
|
Value: 128,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_POST_PARALLEL_READS"},
|
||||||
},
|
},
|
||||||
&cli.DurationFlag{
|
&cli.DurationFlag{
|
||||||
Name: "post-read-timeout",
|
Name: "post-read-timeout",
|
||||||
Usage: "time limit for reading PoSt challenges (0 = no limit)",
|
Usage: "time limit for reading PoSt challenges (0 = no limit)",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_POST_READ_TIMEOUT"},
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: "timeout",
|
Name: "timeout",
|
||||||
Usage: "used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function",
|
Usage: "used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function",
|
||||||
Value: "30m",
|
Value: "30m",
|
||||||
|
EnvVars: []string{"LOTUS_WORKER_TIMEOUT"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Before: func(cctx *cli.Context) error {
|
Before: func(cctx *cli.Context) error {
|
||||||
@ -491,6 +545,7 @@ var runCmd = &cli.Command{
|
|||||||
NoSwap: cctx.Bool("no-swap"),
|
NoSwap: cctx.Bool("no-swap"),
|
||||||
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
|
MaxParallelChallengeReads: cctx.Int("post-parallel-reads"),
|
||||||
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
|
ChallengeReadTimeout: cctx.Duration("post-read-timeout"),
|
||||||
|
Name: cctx.String("name"),
|
||||||
}, remote, localStore, nodeApi, nodeApi, wsts),
|
}, remote, localStore, nodeApi, nodeApi, wsts),
|
||||||
LocalStore: localStore,
|
LocalStore: localStore,
|
||||||
Storage: lr,
|
Storage: lr,
|
||||||
@ -571,7 +626,7 @@ var runCmd = &cli.Command{
|
|||||||
if redeclareStorage {
|
if redeclareStorage {
|
||||||
log.Info("Redeclaring local storage")
|
log.Info("Redeclaring local storage")
|
||||||
|
|
||||||
if err := localStore.Redeclare(ctx); err != nil {
|
if err := localStore.Redeclare(ctx, nil, false); err != nil {
|
||||||
log.Errorf("Redeclaring local storage failed: %+v", err)
|
log.Errorf("Redeclaring local storage failed: %+v", err)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
@ -623,6 +678,17 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-workerApi.Done()
|
||||||
|
// Wait 20s to allow the miner to unregister the worker on next heartbeat
|
||||||
|
time.Sleep(20 * time.Second)
|
||||||
|
log.Warn("Shutting down...")
|
||||||
|
if err := srv.Shutdown(context.TODO()); err != nil {
|
||||||
|
log.Errorf("shutting down RPC server failed: %s", err)
|
||||||
|
}
|
||||||
|
log.Warn("Graceful shutdown successful")
|
||||||
|
}()
|
||||||
|
|
||||||
return srv.Serve(nl)
|
return srv.Serve(nl)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
@ -23,6 +24,8 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("sealworker")
|
||||||
|
|
||||||
func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permission, error), remote http.HandlerFunc, a api.Worker, permissioned bool) http.Handler {
|
func WorkerHandler(authv func(ctx context.Context, token string) ([]auth.Permission, error), remote http.HandlerFunc, a api.Worker, permissioned bool) http.Handler {
|
||||||
mux := mux.NewRouter()
|
mux := mux.NewRouter()
|
||||||
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
readerHandler, readerServerOpt := rpcenc.ReaderParamDecoder()
|
||||||
@ -65,6 +68,20 @@ func (w *Worker) Version(context.Context) (api.Version, error) {
|
|||||||
return api.WorkerAPIVersion0, nil
|
return api.WorkerAPIVersion0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Worker) StorageLocal(ctx context.Context) (map[storiface.ID]string, error) {
|
||||||
|
l, err := w.LocalStore.Local(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := map[storiface.ID]string{}
|
||||||
|
for _, st := range l {
|
||||||
|
out[st.ID] = st.LocalPath
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (w *Worker) StorageAddLocal(ctx context.Context, path string) error {
|
func (w *Worker) StorageAddLocal(ctx context.Context, path string) error {
|
||||||
path, err := homedir.Expand(path)
|
path, err := homedir.Expand(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -84,6 +101,75 @@ func (w *Worker) StorageAddLocal(ctx context.Context, path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Worker) StorageDetachLocal(ctx context.Context, path string) error {
|
||||||
|
path, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding local path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that we have the path opened
|
||||||
|
lps, err := w.LocalStore.Local(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting local path list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var localPath *storiface.StoragePath
|
||||||
|
for _, lp := range lps {
|
||||||
|
if lp.LocalPath == path {
|
||||||
|
lp := lp // copy to make the linter happy
|
||||||
|
localPath = &lp
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if localPath == nil {
|
||||||
|
return xerrors.Errorf("no local paths match '%s'", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop from the persisted storage.json
|
||||||
|
var found bool
|
||||||
|
if err := w.Storage.SetStorage(func(sc *paths.StorageConfig) {
|
||||||
|
out := make([]paths.LocalPath, 0, len(sc.StoragePaths))
|
||||||
|
for _, storagePath := range sc.StoragePaths {
|
||||||
|
if storagePath.Path != path {
|
||||||
|
out = append(out, storagePath)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
sc.StoragePaths = out
|
||||||
|
}); err != nil {
|
||||||
|
return xerrors.Errorf("set storage config: %w", err)
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
// maybe this is fine?
|
||||||
|
return xerrors.Errorf("path not found in storage.json")
|
||||||
|
}
|
||||||
|
|
||||||
|
// unregister locally, drop from sector index
|
||||||
|
return w.LocalStore.ClosePath(ctx, localPath.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) StorageDetachAll(ctx context.Context) error {
|
||||||
|
|
||||||
|
lps, err := w.LocalStore.Local(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("getting local path list: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lp := range lps {
|
||||||
|
err = w.LocalStore.ClosePath(ctx, lp.ID)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("unable to close path: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Worker) StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error {
|
||||||
|
return w.LocalStore.Redeclare(ctx, id, dropMissing)
|
||||||
|
}
|
||||||
|
|
||||||
func (w *Worker) SetEnabled(ctx context.Context, enabled bool) error {
|
func (w *Worker) SetEnabled(ctx context.Context, enabled bool) error {
|
||||||
disabled := int64(1)
|
disabled := int64(1)
|
||||||
if enabled {
|
if enabled {
|
||||||
@ -118,4 +204,9 @@ func (w *Worker) Discover(ctx context.Context) (apitypes.OpenRPCDocument, error)
|
|||||||
return build.OpenRPCDiscoverJSON_Worker(), nil
|
return build.OpenRPCDiscoverJSON_Worker(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *Worker) Shutdown(ctx context.Context) error {
|
||||||
|
return w.LocalWorker.Close()
|
||||||
|
}
|
||||||
|
|
||||||
var _ storiface.WorkerCalls = &Worker{}
|
var _ storiface.WorkerCalls = &Worker{}
|
||||||
|
var _ api.Worker = &Worker{}
|
||||||
|
@ -24,6 +24,8 @@ var storageCmd = &cli.Command{
|
|||||||
Usage: "manage sector storage",
|
Usage: "manage sector storage",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
storageAttachCmd,
|
storageAttachCmd,
|
||||||
|
storageDetachCmd,
|
||||||
|
storageRedeclareCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,3 +130,79 @@ var storageAttachCmd = &cli.Command{
|
|||||||
return nodeApi.StorageAddLocal(ctx, p)
|
return nodeApi.StorageAddLocal(ctx, p)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var storageDetachCmd = &cli.Command{
|
||||||
|
Name: "detach",
|
||||||
|
Usage: "detach local storage path",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "really-do-it",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ArgsUsage: "[path]",
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
nodeApi, closer, err := lcli.GetWorkerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if !cctx.Args().Present() {
|
||||||
|
return xerrors.Errorf("must specify storage path")
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := homedir.Expand(cctx.Args().First())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("expanding path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !cctx.Bool("really-do-it") {
|
||||||
|
return xerrors.Errorf("pass --really-do-it to execute the action")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeApi.StorageDetachLocal(ctx, p)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageRedeclareCmd = &cli.Command{
|
||||||
|
Name: "redeclare",
|
||||||
|
Usage: "redeclare sectors in a local storage path",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
&cli.StringFlag{
|
||||||
|
Name: "id",
|
||||||
|
Usage: "storage path ID",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "all",
|
||||||
|
Usage: "redeclare all storage paths",
|
||||||
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: "drop-missing",
|
||||||
|
Usage: "Drop index entries with missing files",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: func(cctx *cli.Context) error {
|
||||||
|
nodeApi, closer, err := lcli.GetWorkerAPI(cctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer closer()
|
||||||
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
|
if cctx.IsSet("id") && cctx.Bool("all") {
|
||||||
|
return xerrors.Errorf("--id and --all can't be passed at the same time")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.IsSet("id") {
|
||||||
|
id := storiface.ID(cctx.String("id"))
|
||||||
|
return nodeApi.StorageRedeclareLocal(ctx, &id, cctx.Bool("drop-missing"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if cctx.Bool("all") {
|
||||||
|
return nodeApi.StorageRedeclareLocal(ctx, nil, cctx.Bool("drop-missing"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return xerrors.Errorf("either --all or --id must be specified")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -169,6 +169,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
|
|||||||
params.ExecEpoch,
|
params.ExecEpoch,
|
||||||
params.Rand,
|
params.Rand,
|
||||||
recordOutputs,
|
recordOutputs,
|
||||||
|
true,
|
||||||
params.BaseFee,
|
params.BaseFee,
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
* [ActorAddress](#ActorAddress)
|
* [ActorAddress](#ActorAddress)
|
||||||
* [ActorAddressConfig](#ActorAddressConfig)
|
* [ActorAddressConfig](#ActorAddressConfig)
|
||||||
* [ActorSectorSize](#ActorSectorSize)
|
* [ActorSectorSize](#ActorSectorSize)
|
||||||
|
* [ActorWithdrawBalance](#ActorWithdrawBalance)
|
||||||
* [Auth](#Auth)
|
* [Auth](#Auth)
|
||||||
* [AuthNew](#AuthNew)
|
* [AuthNew](#AuthNew)
|
||||||
* [AuthVerify](#AuthVerify)
|
* [AuthVerify](#AuthVerify)
|
||||||
@ -127,6 +128,7 @@
|
|||||||
* [RuntimeSubsystems](#RuntimeSubsystems)
|
* [RuntimeSubsystems](#RuntimeSubsystems)
|
||||||
* [Sealing](#Sealing)
|
* [Sealing](#Sealing)
|
||||||
* [SealingAbort](#SealingAbort)
|
* [SealingAbort](#SealingAbort)
|
||||||
|
* [SealingRemoveRequest](#SealingRemoveRequest)
|
||||||
* [SealingSchedDiag](#SealingSchedDiag)
|
* [SealingSchedDiag](#SealingSchedDiag)
|
||||||
* [Sector](#Sector)
|
* [Sector](#Sector)
|
||||||
* [SectorAbortUpgrade](#SectorAbortUpgrade)
|
* [SectorAbortUpgrade](#SectorAbortUpgrade)
|
||||||
@ -160,6 +162,8 @@
|
|||||||
* [StorageAuthVerify](#StorageAuthVerify)
|
* [StorageAuthVerify](#StorageAuthVerify)
|
||||||
* [StorageBestAlloc](#StorageBestAlloc)
|
* [StorageBestAlloc](#StorageBestAlloc)
|
||||||
* [StorageDeclareSector](#StorageDeclareSector)
|
* [StorageDeclareSector](#StorageDeclareSector)
|
||||||
|
* [StorageDetach](#StorageDetach)
|
||||||
|
* [StorageDetachLocal](#StorageDetachLocal)
|
||||||
* [StorageDropSector](#StorageDropSector)
|
* [StorageDropSector](#StorageDropSector)
|
||||||
* [StorageFindSector](#StorageFindSector)
|
* [StorageFindSector](#StorageFindSector)
|
||||||
* [StorageGetLocks](#StorageGetLocks)
|
* [StorageGetLocks](#StorageGetLocks)
|
||||||
@ -167,6 +171,7 @@
|
|||||||
* [StorageList](#StorageList)
|
* [StorageList](#StorageList)
|
||||||
* [StorageLocal](#StorageLocal)
|
* [StorageLocal](#StorageLocal)
|
||||||
* [StorageLock](#StorageLock)
|
* [StorageLock](#StorageLock)
|
||||||
|
* [StorageRedeclareLocal](#StorageRedeclareLocal)
|
||||||
* [StorageReportHealth](#StorageReportHealth)
|
* [StorageReportHealth](#StorageReportHealth)
|
||||||
* [StorageStat](#StorageStat)
|
* [StorageStat](#StorageStat)
|
||||||
* [StorageTryLock](#StorageTryLock)
|
* [StorageTryLock](#StorageTryLock)
|
||||||
@ -292,6 +297,28 @@ Inputs:
|
|||||||
|
|
||||||
Response: `34359738368`
|
Response: `34359738368`
|
||||||
|
|
||||||
|
### ActorWithdrawBalance
|
||||||
|
WithdrawBalance allows to withdraw balance from miner actor to owner address
|
||||||
|
Specify amount as "0" to withdraw full balance. This method returns a message CID
|
||||||
|
and does not wait for message execution
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"0"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Auth
|
## Auth
|
||||||
|
|
||||||
|
|
||||||
@ -2726,6 +2753,21 @@ Inputs:
|
|||||||
|
|
||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
|
### SealingRemoveRequest
|
||||||
|
SealingSchedRemove removes a request from sealing pipeline
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"07070707-0707-0707-0707-070707070707"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
### SealingSchedDiag
|
### SealingSchedDiag
|
||||||
SealingSchedDiag dumps internal sealing scheduler state
|
SealingSchedDiag dumps internal sealing scheduler state
|
||||||
|
|
||||||
@ -3271,7 +3313,7 @@ Inputs:
|
|||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
### StorageAttach
|
### StorageAttach
|
||||||
SectorIndex
|
paths.SectorIndex
|
||||||
|
|
||||||
|
|
||||||
Perms: admin
|
Perms: admin
|
||||||
@ -3293,6 +3335,12 @@ Inputs:
|
|||||||
],
|
],
|
||||||
"AllowTo": [
|
"AllowTo": [
|
||||||
"string value"
|
"string value"
|
||||||
|
],
|
||||||
|
"AllowTypes": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"DenyTypes": [
|
||||||
|
"string value"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -3328,6 +3376,9 @@ Response:
|
|||||||
```
|
```
|
||||||
|
|
||||||
### StorageBestAlloc
|
### StorageBestAlloc
|
||||||
|
StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
||||||
|
Paths with more weight and more % of free space are preferred.
|
||||||
|
Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
||||||
|
|
||||||
|
|
||||||
Perms: admin
|
Perms: admin
|
||||||
@ -3358,6 +3409,12 @@ Response:
|
|||||||
],
|
],
|
||||||
"AllowTo": [
|
"AllowTo": [
|
||||||
"string value"
|
"string value"
|
||||||
|
],
|
||||||
|
"AllowTypes": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"DenyTypes": [
|
||||||
|
"string value"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -3383,6 +3440,35 @@ Inputs:
|
|||||||
|
|
||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageDetach
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8",
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageDetachLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
### StorageDropSector
|
### StorageDropSector
|
||||||
|
|
||||||
|
|
||||||
@ -3403,6 +3489,14 @@ Inputs:
|
|||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
### StorageFindSector
|
### StorageFindSector
|
||||||
|
StorageFindSector returns list of paths where the specified sector files exist.
|
||||||
|
|
||||||
|
If allowFetch is set, list of paths to which the sector can be fetched will also be returned.
|
||||||
|
- Paths which have sector files locally (don't require fetching) will be listed first.
|
||||||
|
- Paths which have sector files locally will not be filtered based on based on AllowTypes/DenyTypes.
|
||||||
|
- Paths which require fetching will be filtered based on AllowTypes/DenyTypes. If multiple
|
||||||
|
file types are specified, each type will be considered individually, and a union of all paths
|
||||||
|
which can accommodate each file type will be returned.
|
||||||
|
|
||||||
|
|
||||||
Perms: admin
|
Perms: admin
|
||||||
@ -3434,7 +3528,13 @@ Response:
|
|||||||
"Weight": 42,
|
"Weight": 42,
|
||||||
"CanSeal": true,
|
"CanSeal": true,
|
||||||
"CanStore": true,
|
"CanStore": true,
|
||||||
"Primary": true
|
"Primary": true,
|
||||||
|
"AllowTypes": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"DenyTypes": [
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
@ -3502,6 +3602,12 @@ Response:
|
|||||||
],
|
],
|
||||||
"AllowTo": [
|
"AllowTo": [
|
||||||
"string value"
|
"string value"
|
||||||
|
],
|
||||||
|
"AllowTypes": [
|
||||||
|
"string value"
|
||||||
|
],
|
||||||
|
"DenyTypes": [
|
||||||
|
"string value"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -3559,6 +3665,21 @@ Inputs:
|
|||||||
|
|
||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageRedeclareLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"1399aa04-2625-44b1-bad4-bd07b59b22c4",
|
||||||
|
true
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
### StorageReportHealth
|
### StorageReportHealth
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
* [Paths](#Paths)
|
* [Paths](#Paths)
|
||||||
* [Remove](#Remove)
|
* [Remove](#Remove)
|
||||||
* [Session](#Session)
|
* [Session](#Session)
|
||||||
|
* [Shutdown](#Shutdown)
|
||||||
* [Version](#Version)
|
* [Version](#Version)
|
||||||
* [Add](#Add)
|
* [Add](#Add)
|
||||||
* [AddPiece](#AddPiece)
|
* [AddPiece](#AddPiece)
|
||||||
@ -38,6 +39,10 @@
|
|||||||
* [SetEnabled](#SetEnabled)
|
* [SetEnabled](#SetEnabled)
|
||||||
* [Storage](#Storage)
|
* [Storage](#Storage)
|
||||||
* [StorageAddLocal](#StorageAddLocal)
|
* [StorageAddLocal](#StorageAddLocal)
|
||||||
|
* [StorageDetachAll](#StorageDetachAll)
|
||||||
|
* [StorageDetachLocal](#StorageDetachLocal)
|
||||||
|
* [StorageLocal](#StorageLocal)
|
||||||
|
* [StorageRedeclareLocal](#StorageRedeclareLocal)
|
||||||
* [Task](#Task)
|
* [Task](#Task)
|
||||||
* [TaskDisable](#TaskDisable)
|
* [TaskDisable](#TaskDisable)
|
||||||
* [TaskEnable](#TaskEnable)
|
* [TaskEnable](#TaskEnable)
|
||||||
@ -1453,6 +1458,16 @@ Inputs: `null`
|
|||||||
|
|
||||||
Response: `"07070707-0707-0707-0707-070707070707"`
|
Response: `"07070707-0707-0707-0707-070707070707"`
|
||||||
|
|
||||||
|
### Shutdown
|
||||||
|
Trigger shutdown
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
### Version
|
### Version
|
||||||
|
|
||||||
|
|
||||||
@ -2107,6 +2122,58 @@ Inputs:
|
|||||||
|
|
||||||
Response: `{}`
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageDetachAll
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageDetachLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"string value"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
|
### StorageLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs: `null`
|
||||||
|
|
||||||
|
Response:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### StorageRedeclareLocal
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
"1399aa04-2625-44b1-bad4-bd07b59b22c4",
|
||||||
|
true
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
## Task
|
## Task
|
||||||
|
|
||||||
|
|
||||||
|
@ -6195,7 +6195,7 @@ Response:
|
|||||||
### StateReplay
|
### StateReplay
|
||||||
StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
|
|
||||||
If a tipset key is provided, and a replacing message is found on chain,
|
If a tipset key is provided, and a replacing message is not found on chain,
|
||||||
the method will return an error saying that the message wasn't found
|
the method will return an error saying that the message wasn't found
|
||||||
|
|
||||||
If no tipset key is provided, the appropriate tipset is looked up, and if
|
If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
* [ChainHasObj](#ChainHasObj)
|
* [ChainHasObj](#ChainHasObj)
|
||||||
* [ChainHead](#ChainHead)
|
* [ChainHead](#ChainHead)
|
||||||
* [ChainNotify](#ChainNotify)
|
* [ChainNotify](#ChainNotify)
|
||||||
|
* [ChainPrune](#ChainPrune)
|
||||||
* [ChainPutObj](#ChainPutObj)
|
* [ChainPutObj](#ChainPutObj)
|
||||||
* [ChainReadObj](#ChainReadObj)
|
* [ChainReadObj](#ChainReadObj)
|
||||||
* [ChainSetHead](#ChainSetHead)
|
* [ChainSetHead](#ChainSetHead)
|
||||||
@ -962,6 +963,25 @@ Response:
|
|||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### ChainPrune
|
||||||
|
ChainPrune prunes the stored chain state and garbage collects; only supported if you
|
||||||
|
are using the splitstore
|
||||||
|
|
||||||
|
|
||||||
|
Perms: admin
|
||||||
|
|
||||||
|
Inputs:
|
||||||
|
```json
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"MovingGC": true,
|
||||||
|
"RetainState": 9
|
||||||
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
Response: `{}`
|
||||||
|
|
||||||
### ChainPutObj
|
### ChainPutObj
|
||||||
ChainPutObj puts a given object into the block store
|
ChainPutObj puts a given object into the block store
|
||||||
|
|
||||||
@ -6663,7 +6683,7 @@ Response:
|
|||||||
### StateReplay
|
### StateReplay
|
||||||
StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
|
|
||||||
If a tipset key is provided, and a replacing message is found on chain,
|
If a tipset key is provided, and a replacing message is not found on chain,
|
||||||
the method will return an error saying that the message wasn't found
|
the method will return an error saying that the message wasn't found
|
||||||
|
|
||||||
If no tipset key is provided, the appropriate tipset is looked up, and if
|
If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
|
@ -7,7 +7,7 @@ USAGE:
|
|||||||
lotus-miner [global options] command [command options] [arguments...]
|
lotus-miner [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
1.17.0
|
1.17.1
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
init Initialize a lotus miner repo
|
init Initialize a lotus miner repo
|
||||||
@ -2055,7 +2055,7 @@ USAGE:
|
|||||||
lotus-miner proving deadlines [command options] [arguments...]
|
lotus-miner proving deadlines [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--all, -a Count all sectors (only live sectors are counted by default) (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -2068,6 +2068,7 @@ USAGE:
|
|||||||
lotus-miner proving deadline [command options] <deadlineIdx>
|
lotus-miner proving deadline [command options] <deadlineIdx>
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
|
--bitfield, -b Print partition bitfield stats (default: false)
|
||||||
--sector-nums, -n Print sector/fault numbers belonging to this deadline (default: false)
|
--sector-nums, -n Print sector/fault numbers belonging to this deadline (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
@ -2151,6 +2152,8 @@ DESCRIPTION:
|
|||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
attach attach local storage path
|
attach attach local storage path
|
||||||
|
detach detach local storage path
|
||||||
|
redeclare redeclare sectors in a local storage path
|
||||||
list list local storage paths
|
list list local storage paths
|
||||||
find find sector in the storage system
|
find find sector in the storage system
|
||||||
cleanup trigger cleanup actions
|
cleanup trigger cleanup actions
|
||||||
@ -2201,6 +2204,34 @@ OPTIONS:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### lotus-miner storage detach
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-miner storage detach - detach local storage path
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-miner storage detach [command options] [path]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--really-do-it (default: false)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### lotus-miner storage redeclare
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-miner storage redeclare - redeclare sectors in a local storage path
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-miner storage redeclare [command options] [arguments...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--all redeclare all storage paths (default: false)
|
||||||
|
--drop-missing Drop index entries with missing files (default: false)
|
||||||
|
--id value storage path ID
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### lotus-miner storage list
|
### lotus-miner storage list
|
||||||
```
|
```
|
||||||
NAME:
|
NAME:
|
||||||
@ -2341,7 +2372,7 @@ USAGE:
|
|||||||
lotus-miner sealing abort [command options] [callid]
|
lotus-miner sealing abort [command options] [callid]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--help, -h show help (default: false)
|
--sched Specifies that the argument is UUID of the request to be removed from scheduler (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -7,10 +7,11 @@ USAGE:
|
|||||||
lotus-worker [global options] command [command options] [arguments...]
|
lotus-worker [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
1.17.0
|
1.17.1
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
run Start lotus worker
|
run Start lotus worker
|
||||||
|
stop Stop a running lotus worker
|
||||||
info Print worker info
|
info Print worker info
|
||||||
storage manage sector storage
|
storage manage sector storage
|
||||||
set Manage worker settings
|
set Manage worker settings
|
||||||
@ -20,7 +21,7 @@ COMMANDS:
|
|||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
GLOBAL OPTIONS:
|
GLOBAL OPTIONS:
|
||||||
--enable-gpu-proving enable use of GPU for mining operations (default: true)
|
--enable-gpu-proving enable use of GPU for mining operations (default: true) [$LOTUS_WORKER_ENABLE_GPU_PROVING]
|
||||||
--help, -h show help (default: false)
|
--help, -h show help (default: false)
|
||||||
--miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
--miner-repo value, --storagerepo value Specify miner repo path. flag storagerepo and env LOTUS_STORAGE_PATH are DEPRECATION, will REMOVE SOON (default: "~/.lotusminer") [$LOTUS_MINER_PATH, $LOTUS_STORAGE_PATH]
|
||||||
--version, -v print the version (default: false)
|
--version, -v print the version (default: false)
|
||||||
@ -37,24 +38,38 @@ USAGE:
|
|||||||
lotus-worker run [command options] [arguments...]
|
lotus-worker run [command options] [arguments...]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
--addpiece enable addpiece (default: true)
|
--addpiece enable addpiece (default: true) [$LOTUS_WORKER_ADDPIECE]
|
||||||
--commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true)
|
--commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true) [$LOTUS_WORKER_COMMIT]
|
||||||
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456")
|
--listen value host address and port the worker api will listen on (default: "0.0.0.0:3456") [$LOTUS_WORKER_LISTEN]
|
||||||
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false)
|
--name value custom worker name (default: hostname) [$LOTUS_WORKER_NAME]
|
||||||
--no-local-storage don't use storageminer repo for sector storage (default: false)
|
--no-default disable all default compute tasks, use the worker for storage/fetching only (default: false) [$LOTUS_WORKER_NO_DEFAULT]
|
||||||
--no-swap don't use swap (default: false)
|
--no-local-storage don't use storageminer repo for sector storage (default: false) [$LOTUS_WORKER_NO_LOCAL_STORAGE]
|
||||||
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5)
|
--no-swap don't use swap (default: false) [$LOTUS_WORKER_NO_SWAP]
|
||||||
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 128)
|
--parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) [$LOTUS_WORKER_PARALLEL_FETCH_LIMIT]
|
||||||
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s)
|
--post-parallel-reads value maximum number of parallel challenge reads (0 = no limit) (default: 128) [$LOTUS_WORKER_POST_PARALLEL_READS]
|
||||||
--precommit1 enable precommit1 (32G sectors: 1 core, 128GiB Memory) (default: true)
|
--post-read-timeout value time limit for reading PoSt challenges (0 = no limit) (default: 0s) [$LOTUS_WORKER_POST_READ_TIMEOUT]
|
||||||
--precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true)
|
--precommit1 enable precommit1 (32G sectors: 1 core, 128GiB Memory) (default: true) [$LOTUS_WORKER_PRECOMMIT1]
|
||||||
--prove-replica-update2 enable prove replica update 2 (default: true)
|
--precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true) [$LOTUS_WORKER_PRECOMMIT2]
|
||||||
--regen-sector-key enable regen sector key (default: true)
|
--prove-replica-update2 enable prove replica update 2 (default: true) [$LOTUS_WORKER_PROVE_REPLICA_UPDATE2]
|
||||||
--replica-update enable replica update (default: true)
|
--regen-sector-key enable regen sector key (default: true) [$LOTUS_WORKER_REGEN_SECTOR_KEY]
|
||||||
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m")
|
--replica-update enable replica update (default: true) [$LOTUS_WORKER_REPLICA_UPDATE]
|
||||||
--unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true)
|
--timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") [$LOTUS_WORKER_TIMEOUT]
|
||||||
--windowpost enable window post (default: false)
|
--unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true) [$LOTUS_WORKER_UNSEAL]
|
||||||
--winningpost enable winning post (default: false)
|
--windowpost enable window post (default: false) [$LOTUS_WORKER_WINDOWPOST]
|
||||||
|
--winningpost enable winning post (default: false) [$LOTUS_WORKER_WINNINGPOST]
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## lotus-worker stop
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-worker stop - Stop a running lotus worker
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-worker stop [command options] [arguments...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--help, -h show help (default: false)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -81,6 +96,8 @@ USAGE:
|
|||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
attach attach local storage path
|
attach attach local storage path
|
||||||
|
detach detach local storage path
|
||||||
|
redeclare redeclare sectors in a local storage path
|
||||||
help, h Shows a list of commands or help for one command
|
help, h Shows a list of commands or help for one command
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
@ -107,6 +124,34 @@ OPTIONS:
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### lotus-worker storage detach
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-worker storage detach - detach local storage path
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-worker storage detach [command options] [path]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--really-do-it (default: false)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### lotus-worker storage redeclare
|
||||||
|
```
|
||||||
|
NAME:
|
||||||
|
lotus-worker storage redeclare - redeclare sectors in a local storage path
|
||||||
|
|
||||||
|
USAGE:
|
||||||
|
lotus-worker storage redeclare [command options] [arguments...]
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--all redeclare all storage paths (default: false)
|
||||||
|
--drop-missing Drop index entries with missing files (default: false)
|
||||||
|
--id value storage path ID
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
## lotus-worker set
|
## lotus-worker set
|
||||||
```
|
```
|
||||||
NAME:
|
NAME:
|
||||||
|
@ -7,7 +7,7 @@ USAGE:
|
|||||||
lotus [global options] command [command options] [arguments...]
|
lotus [global options] command [command options] [arguments...]
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
1.17.0
|
1.17.1
|
||||||
|
|
||||||
COMMANDS:
|
COMMANDS:
|
||||||
daemon Start a lotus daemon process
|
daemon Start a lotus daemon process
|
||||||
|
@ -201,4 +201,28 @@
|
|||||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY
|
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY
|
||||||
#HotStoreFullGCFrequency = 20
|
#HotStoreFullGCFrequency = 20
|
||||||
|
|
||||||
|
# EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||||
|
# where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||||
|
# Default is false
|
||||||
|
#
|
||||||
|
# type: bool
|
||||||
|
# env var: LOTUS_CHAINSTORE_SPLITSTORE_ENABLECOLDSTOREAUTOPRUNE
|
||||||
|
#EnableColdStoreAutoPrune = false
|
||||||
|
|
||||||
|
# ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||||
|
# Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||||
|
# full GC in every prune.
|
||||||
|
# Default is 7 (about once every a week)
|
||||||
|
#
|
||||||
|
# type: uint64
|
||||||
|
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTOREFULLGCFREQUENCY
|
||||||
|
#ColdStoreFullGCFrequency = 7
|
||||||
|
|
||||||
|
# ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||||
|
# finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||||
|
#
|
||||||
|
# type: int64
|
||||||
|
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORERETENTION
|
||||||
|
#ColdStoreRetention = 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -622,6 +622,13 @@
|
|||||||
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
|
# env var: LOTUS_STORAGE_ALLOWREGENSECTORKEY
|
||||||
#AllowRegenSectorKey = true
|
#AllowRegenSectorKey = true
|
||||||
|
|
||||||
|
# LocalWorkerName specifies a custom name for the builtin worker.
|
||||||
|
# If set to an empty string (default) os hostname will be used
|
||||||
|
#
|
||||||
|
# type: string
|
||||||
|
# env var: LOTUS_STORAGE_LOCALWORKERNAME
|
||||||
|
#LocalWorkerName = ""
|
||||||
|
|
||||||
# Assigner specifies the worker assigner to use when scheduling tasks.
|
# Assigner specifies the worker assigner to use when scheduling tasks.
|
||||||
# "utilization" (default) - assign tasks to workers with lowest utilization.
|
# "utilization" (default) - assign tasks to workers with lowest utilization.
|
||||||
# "spread" - assign tasks to as many distinct workers as possible.
|
# "spread" - assign tasks to as many distinct workers as possible.
|
||||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 32afd6e1f1419b6bb7d0f4b3944287fde593ca64
|
Subproject commit f997fe6c77632c0bc58d0b1fdf53ee7a93f6027c
|
2
go.mod
2
go.mod
@ -125,7 +125,7 @@ require (
|
|||||||
github.com/multiformats/go-base32 v0.0.4
|
github.com/multiformats/go-base32 v0.0.4
|
||||||
github.com/multiformats/go-multiaddr v0.5.0
|
github.com/multiformats/go-multiaddr v0.5.0
|
||||||
github.com/multiformats/go-multiaddr-dns v0.3.1
|
github.com/multiformats/go-multiaddr-dns v0.3.1
|
||||||
github.com/multiformats/go-multibase v0.0.3
|
github.com/multiformats/go-multibase v0.1.1
|
||||||
github.com/multiformats/go-multihash v0.1.0
|
github.com/multiformats/go-multihash v0.1.0
|
||||||
github.com/multiformats/go-varint v0.0.6
|
github.com/multiformats/go-varint v0.0.6
|
||||||
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333
|
||||||
|
3
go.sum
3
go.sum
@ -1655,8 +1655,9 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj
|
|||||||
github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
|
github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA=
|
||||||
github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA=
|
github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA=
|
||||||
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs=
|
||||||
github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk=
|
|
||||||
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc=
|
||||||
|
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
|
||||||
|
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
|
||||||
github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4=
|
github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4=
|
||||||
github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
|
github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ=
|
||||||
github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ=
|
github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ=
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -98,11 +99,21 @@ func (ts *apiSuite) testConnectTwo(t *testing.T) {
|
|||||||
|
|
||||||
peers, err := one.NetPeers(ctx)
|
peers, err := one.NetPeers(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Lenf(t, peers, 2, "node one doesn't have 2 peers")
|
|
||||||
|
countPeerIDs := func(peers []peer.AddrInfo) int {
|
||||||
|
peerIDs := make(map[peer.ID]struct{})
|
||||||
|
for _, p := range peers {
|
||||||
|
peerIDs[p.ID] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(peerIDs)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers")
|
||||||
|
|
||||||
peers, err = two.NetPeers(ctx)
|
peers, err = two.NetPeers(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Lenf(t, peers, 2, "node two doesn't have 2 peers")
|
require.Equal(t, countPeerIDs(peers), 2, "node one doesn't have 2 peers")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
func (ts *apiSuite) testSearchMsg(t *testing.T) {
|
||||||
|
@ -74,7 +74,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
checkNoPadding := func() {
|
checkNoPadding := func() {
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Slice(sl, func(i, j int) bool {
|
sort.Slice(sl, func(i, j int) bool {
|
||||||
@ -125,7 +125,7 @@ func TestBatchDealInput(t *testing.T) {
|
|||||||
|
|
||||||
checkNoPadding()
|
checkNoPadding()
|
||||||
|
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, len(sl), expectSectors)
|
require.Equal(t, len(sl), expectSectors)
|
||||||
}
|
}
|
||||||
|
@ -60,11 +60,11 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
||||||
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
||||||
@ -79,7 +79,7 @@ func runTestCCUpgrade(t *testing.T) *kit.TestFullNode {
|
|||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sl, err = miner.SectorsList(ctx)
|
sl, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ func TestDeadlineToggling(t *testing.T) {
|
|||||||
// terminate sectors on minerD
|
// terminate sectors on minerD
|
||||||
{
|
{
|
||||||
var terminationDeclarationParams []miner2.TerminationDeclaration
|
var terminationDeclarationParams []miner2.TerminationDeclaration
|
||||||
secs, err := minerD.SectorsList(ctx)
|
secs, err := minerD.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, secs, sectorsD)
|
require.Len(t, secs, sectorsD)
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
|||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t)
|
client, miner, ens := kit.EnsembleMinimal(t)
|
||||||
ens.InterconnectAll().BeginMining(blocktime)
|
ens.InterconnectAll().BeginMiningMustPost(blocktime)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ppb = int64(1)
|
ppb = int64(1)
|
||||||
@ -70,7 +70,7 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) {
|
|||||||
//stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001
|
//stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001
|
||||||
ss, err := miner.StorageList(context.Background())
|
ss, err := miner.StorageList(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = miner.SectorsList(ctx)
|
_, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
//stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001
|
//stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001
|
||||||
@ -95,7 +95,7 @@ iLoop:
|
|||||||
// remove the other unsealed file as well
|
// remove the other unsealed file as well
|
||||||
ss, err = miner.StorageList(context.Background())
|
ss, err = miner.StorageList(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = miner.SectorsList(ctx)
|
_, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for storeID, sd := range ss {
|
for storeID, sd := range ss {
|
||||||
for _, sector := range sd {
|
for _, sector := range sd {
|
||||||
@ -131,7 +131,7 @@ func TestZeroPricePerByteRetrieval(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
ens.InterconnectAll().BeginMining(blockTime)
|
ens.InterconnectAll().BeginMiningMustPost(blockTime)
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
|
@ -28,6 +28,15 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestDealsRetryLackOfFunds(t *testing.T) {
|
func TestDealsRetryLackOfFunds(t *testing.T) {
|
||||||
|
t.Run("cover-gas", func(t *testing.T) {
|
||||||
|
testDealsRetryLackOfFunds(t, types.NewInt(1020000000000))
|
||||||
|
})
|
||||||
|
t.Run("empty", func(t *testing.T) {
|
||||||
|
testDealsRetryLackOfFunds(t, types.NewInt(1))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDealsRetryLackOfFunds(t *testing.T, publishStorageAccountFunds abi.TokenAmount) {
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
@ -61,7 +70,6 @@ func TestDealsRetryLackOfFunds(t *testing.T) {
|
|||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
|
|
||||||
publishStorageAccountFunds := types.NewInt(1020000000000)
|
|
||||||
minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt)
|
minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt)
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
@ -178,69 +186,3 @@ func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) {
|
|||||||
case <-time.After(time.Second * 15):
|
case <-time.After(time.Second * 15):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) {
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
|
||||||
//stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001
|
|
||||||
//stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001
|
|
||||||
ctx := context.Background()
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
// Allow 8MB sectors
|
|
||||||
eightMBSectorsOpt := kit.SectorSize(8 << 20)
|
|
||||||
|
|
||||||
publishStorageDealKey, err := key.GenerateKey(types.KTSecp256k1)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
opts := node.Options(
|
|
||||||
node.Override(new(*storageadapter.DealPublisher),
|
|
||||||
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
|
|
||||||
Period: publishPeriod,
|
|
||||||
MaxDealsPerMsg: maxDealsPerMsg,
|
|
||||||
}),
|
|
||||||
),
|
|
||||||
node.Override(new(*ctladdr.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{
|
|
||||||
DealPublishControl: []string{
|
|
||||||
publishStorageDealKey.Address.String(),
|
|
||||||
},
|
|
||||||
DisableOwnerFallback: true,
|
|
||||||
DisableWorkerFallback: true,
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
|
|
||||||
publishStorageAccountFunds := types.NewInt(1)
|
|
||||||
minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt)
|
|
||||||
|
|
||||||
kit.QuietMiningLogs()
|
|
||||||
|
|
||||||
ens.
|
|
||||||
Start().
|
|
||||||
InterconnectAll().
|
|
||||||
BeginMining(blockTime)
|
|
||||||
|
|
||||||
_, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
miner.SetControlAddresses(publishStorageDealKey.Address)
|
|
||||||
|
|
||||||
dh := kit.NewDealHarness(t, clientFullNode, miner, miner)
|
|
||||||
|
|
||||||
res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file.
|
|
||||||
list, err := clientFullNode.ClientListImports(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Len(t, list, 1)
|
|
||||||
require.Equal(t, res.Root, *list[0].Root)
|
|
||||||
|
|
||||||
dp := dh.DefaultStartDealParams()
|
|
||||||
dp.Data.Root = res.Root
|
|
||||||
dp.FastRetrieval = true
|
|
||||||
dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price.
|
|
||||||
deal := dh.StartDeal(ctx, dp)
|
|
||||||
|
|
||||||
err = dh.ExpectDealFailure(ctx, deal, "Actor balance less than needed")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
54
itests/gas_estimation_test.go
Normal file
54
itests/gas_estimation_test.go
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/account"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEstimateGasNoFunds(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
kit.QuietMiningLogs()
|
||||||
|
|
||||||
|
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs())
|
||||||
|
ens.InterconnectAll().BeginMining(10 * time.Millisecond)
|
||||||
|
|
||||||
|
// create a new address
|
||||||
|
addr, err := client.WalletNew(ctx, types.KTBLS)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create that address.
|
||||||
|
msg := &types.Message{
|
||||||
|
From: client.DefaultKey.Address,
|
||||||
|
To: addr,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sm, err := client.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Make sure we can estimate gas even if we have no funds.
|
||||||
|
msg2 := &types.Message{
|
||||||
|
From: addr,
|
||||||
|
To: client.DefaultKey.Address,
|
||||||
|
Method: account.Methods.PubkeyAddress,
|
||||||
|
Value: big.Zero(),
|
||||||
|
}
|
||||||
|
|
||||||
|
limit, err := client.GasEstimateGasLimit(ctx, msg2, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotZero(t, limit)
|
||||||
|
}
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
@ -28,6 +29,8 @@ type BlockMiner struct {
|
|||||||
miner *TestMiner
|
miner *TestMiner
|
||||||
|
|
||||||
nextNulls int64
|
nextNulls int64
|
||||||
|
pause chan struct{}
|
||||||
|
unpause chan struct{}
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
}
|
}
|
||||||
@ -37,6 +40,8 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
|
|||||||
t: t,
|
t: t,
|
||||||
miner: miner,
|
miner: miner,
|
||||||
cancel: func() {},
|
cancel: func() {},
|
||||||
|
unpause: make(chan struct{}),
|
||||||
|
pause: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -184,7 +189,12 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
|
|||||||
|
|
||||||
var target abi.ChainEpoch
|
var target abi.ChainEpoch
|
||||||
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
|
reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) {
|
||||||
|
// if api shuts down before mining, we may get an error which we should probably just ignore
|
||||||
|
// (fixing it will require rewriting most of the mining loop)
|
||||||
|
if err != nil && !strings.Contains(err.Error(), "websocket connection closed") {
|
||||||
require.NoError(bm.t, err)
|
require.NoError(bm.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
target = epoch
|
target = epoch
|
||||||
wait <- success
|
wait <- success
|
||||||
}
|
}
|
||||||
@ -249,6 +259,18 @@ func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) {
|
|||||||
defer bm.wg.Done()
|
defer bm.wg.Done()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
select {
|
||||||
|
case <-bm.pause:
|
||||||
|
select {
|
||||||
|
case <-bm.unpause:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(blocktime):
|
case <-time.After(blocktime):
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@ -277,6 +299,16 @@ func (bm *BlockMiner) InjectNulls(rounds abi.ChainEpoch) {
|
|||||||
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
atomic.AddInt64(&bm.nextNulls, int64(rounds))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Pause compels the miner to wait for a signal to restart
|
||||||
|
func (bm *BlockMiner) Pause() {
|
||||||
|
bm.pause <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart continues mining after a pause. This will hang if called before pause
|
||||||
|
func (bm *BlockMiner) Restart() {
|
||||||
|
bm.unpause <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
func (bm *BlockMiner) MineUntilBlock(ctx context.Context, fn *TestFullNode, cb func(abi.ChainEpoch)) {
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
var (
|
var (
|
||||||
@ -329,4 +361,12 @@ func (bm *BlockMiner) Stop() {
|
|||||||
bm.t.Log("shutting down mining")
|
bm.t.Log("shutting down mining")
|
||||||
bm.cancel()
|
bm.cancel()
|
||||||
bm.wg.Wait()
|
bm.wg.Wait()
|
||||||
|
if bm.unpause != nil {
|
||||||
|
close(bm.unpause)
|
||||||
|
bm.unpause = nil
|
||||||
|
}
|
||||||
|
if bm.pause != nil {
|
||||||
|
close(bm.pause)
|
||||||
|
bm.pause = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -292,7 +292,7 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||||
snums, err := dh.main.SectorsList(ctx)
|
snums, err := dh.main.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(dh.t, err)
|
require.NoError(dh.t, err)
|
||||||
for _, snum := range snums {
|
for _, snum := range snums {
|
||||||
si, err := dh.main.SectorsStatus(ctx, snum, false)
|
si, err := dh.main.SectorsStatus(ctx, snum, false)
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
@ -20,13 +21,13 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
cborutil "github.com/filecoin-project/go-cbor-util"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/builtin"
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
"github.com/filecoin-project/go-statestore"
|
"github.com/filecoin-project/go-statestore"
|
||||||
"github.com/filecoin-project/go-storedcounter"
|
|
||||||
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
power3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/power"
|
||||||
|
|
||||||
@ -56,6 +57,7 @@ import (
|
|||||||
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
testing2 "github.com/filecoin-project/lotus/node/modules/testing"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
"github.com/filecoin-project/lotus/storage/paths"
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
pipeline "github.com/filecoin-project/lotus/storage/pipeline"
|
||||||
sectorstorage "github.com/filecoin-project/lotus/storage/sealer"
|
sectorstorage "github.com/filecoin-project/lotus/storage/sealer"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/mock"
|
"github.com/filecoin-project/lotus/storage/sealer/mock"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
@ -233,9 +235,12 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
|||||||
}
|
}
|
||||||
|
|
||||||
ownerKey := options.ownerKey
|
ownerKey := options.ownerKey
|
||||||
|
var presealSectors int
|
||||||
|
|
||||||
if !n.bootstrapped {
|
if !n.bootstrapped {
|
||||||
|
presealSectors = options.sectors
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sectors = options.sectors
|
|
||||||
k *types.KeyInfo
|
k *types.KeyInfo
|
||||||
genm *genesis.Miner
|
genm *genesis.Miner
|
||||||
)
|
)
|
||||||
@ -246,9 +251,9 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
|||||||
|
|
||||||
// Create the preseal commitment.
|
// Create the preseal commitment.
|
||||||
if n.options.mockProofs {
|
if n.options.mockProofs {
|
||||||
genm, k, err = mock.PreSeal(proofType, actorAddr, sectors)
|
genm, k, err = mock.PreSeal(proofType, actorAddr, presealSectors)
|
||||||
} else {
|
} else {
|
||||||
genm, k, err = seed.PreSeal(actorAddr, proofType, 0, sectors, tdir, []byte("make genesis mem random"), nil, true)
|
genm, k, err = seed.PreSeal(actorAddr, proofType, 0, presealSectors, tdir, []byte("make genesis mem random"), nil, true)
|
||||||
}
|
}
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
@ -279,6 +284,7 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
|||||||
OwnerKey: ownerKey,
|
OwnerKey: ownerKey,
|
||||||
FullNode: full,
|
FullNode: full,
|
||||||
PresealDir: tdir,
|
PresealDir: tdir,
|
||||||
|
PresealSectors: presealSectors,
|
||||||
options: options,
|
options: options,
|
||||||
RemoteListener: rl,
|
RemoteListener: rl,
|
||||||
}
|
}
|
||||||
@ -335,12 +341,48 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
|
|
||||||
// Create all inactive full nodes.
|
// Create all inactive full nodes.
|
||||||
for i, full := range n.inactive.fullnodes {
|
for i, full := range n.inactive.fullnodes {
|
||||||
r := repo.NewMemory(nil)
|
|
||||||
|
var r repo.Repo
|
||||||
|
if !full.options.fsrepo {
|
||||||
|
rmem := repo.NewMemory(nil)
|
||||||
|
n.t.Cleanup(rmem.Cleanup)
|
||||||
|
r = rmem
|
||||||
|
} else {
|
||||||
|
repoPath := n.t.TempDir()
|
||||||
|
rfs, err := repo.NewFS(repoPath)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
require.NoError(n.t, rfs.Init(repo.FullNode))
|
||||||
|
r = rfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup config with options
|
||||||
|
lr, err := r.Lock(repo.FullNode)
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
c, err := lr.Config()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
cfg, ok := c.(*config.FullNode)
|
||||||
|
if !ok {
|
||||||
|
n.t.Fatalf("invalid config from repo, got: %T", c)
|
||||||
|
}
|
||||||
|
for _, opt := range full.options.cfgOpts {
|
||||||
|
require.NoError(n.t, opt(cfg))
|
||||||
|
}
|
||||||
|
err = lr.SetConfig(func(raw interface{}) {
|
||||||
|
rcfg := raw.(*config.FullNode)
|
||||||
|
*rcfg = *cfg
|
||||||
|
})
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
err = lr.Close()
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
opts := []node.Option{
|
opts := []node.Option{
|
||||||
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
node.FullAPI(&full.FullNode, node.Lite(full.options.lite)),
|
||||||
node.Base(),
|
node.Base(),
|
||||||
node.Repo(r),
|
node.Repo(r),
|
||||||
node.MockHost(n.mn),
|
node.If(full.options.disableLibp2p, node.MockHost(n.mn)),
|
||||||
node.Test(),
|
node.Test(),
|
||||||
|
|
||||||
// so that we subscribe to pubsub topics immediately
|
// so that we subscribe to pubsub topics immediately
|
||||||
@ -390,7 +432,10 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
n.inactive.fullnodes[i] = withRPC
|
n.inactive.fullnodes[i] = withRPC
|
||||||
}
|
}
|
||||||
|
|
||||||
n.t.Cleanup(func() { _ = stop(context.Background()) })
|
n.t.Cleanup(func() {
|
||||||
|
_ = stop(context.Background())
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
n.active.fullnodes = append(n.active.fullnodes, full)
|
n.active.fullnodes = append(n.active.fullnodes, full)
|
||||||
}
|
}
|
||||||
@ -506,9 +551,6 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
|
|
||||||
cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
cfg.Subsystems.SectorIndexApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
||||||
cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
cfg.Subsystems.SealerApiInfo = fmt.Sprintf("%s:%s", token, m.options.mainMiner.ListenAddr)
|
||||||
|
|
||||||
fmt.Println("config for market node, setting SectorIndexApiInfo to: ", cfg.Subsystems.SectorIndexApiInfo)
|
|
||||||
fmt.Println("config for market node, setting SealerApiInfo to: ", cfg.Subsystems.SealerApiInfo)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = lr.SetConfig(func(raw interface{}) {
|
err = lr.SetConfig(func(raw interface{}) {
|
||||||
@ -535,18 +577,20 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
|
err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes())
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
if i < len(n.genesis.miners) && !n.bootstrapped {
|
||||||
for i := 0; i < m.options.sectors; i++ {
|
// if this is a genesis miner, import preseal metadata
|
||||||
_, err := nic.Next()
|
require.NoError(n.t, importPreSealMeta(ctx, n.genesis.miners[i], ds))
|
||||||
require.NoError(n.t, err)
|
|
||||||
}
|
}
|
||||||
_, err = nic.Next()
|
|
||||||
require.NoError(n.t, err)
|
|
||||||
|
|
||||||
// using real proofs, therefore need real sectors.
|
// using real proofs, therefore need real sectors.
|
||||||
if !n.bootstrapped && !n.options.mockProofs {
|
if !n.bootstrapped && !n.options.mockProofs {
|
||||||
psd := m.PresealDir
|
psd := m.PresealDir
|
||||||
|
noPaths := m.options.noStorage
|
||||||
|
|
||||||
err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||||
|
if noPaths {
|
||||||
|
sc.StoragePaths = []paths.LocalPath{}
|
||||||
|
}
|
||||||
sc.StoragePaths = append(sc.StoragePaths, paths.LocalPath{Path: psd})
|
sc.StoragePaths = append(sc.StoragePaths, paths.LocalPath{Path: psd})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -583,7 +627,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
node.Repo(r),
|
node.Repo(r),
|
||||||
node.Test(),
|
node.Test(),
|
||||||
|
|
||||||
node.If(!m.options.disableLibp2p, node.MockHost(n.mn)),
|
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
||||||
|
|
||||||
node.Override(new(v1api.FullNode), m.FullNode.FullNode),
|
node.Override(new(v1api.FullNode), m.FullNode.FullNode),
|
||||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||||
@ -693,6 +737,13 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
lr, err := r.Lock(repo.Worker)
|
lr, err := r.Lock(repo.Worker)
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
|
if m.options.noStorage {
|
||||||
|
err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||||
|
sc.StoragePaths = []paths.LocalPath{}
|
||||||
|
})
|
||||||
|
require.NoError(n.t, err)
|
||||||
|
}
|
||||||
|
|
||||||
ds, err := lr.Datastore(context.Background(), "/metadata")
|
ds, err := lr.Datastore(context.Background(), "/metadata")
|
||||||
require.NoError(n.t, err)
|
require.NoError(n.t, err)
|
||||||
|
|
||||||
@ -715,6 +766,7 @@ func (n *Ensemble) Start() *Ensemble {
|
|||||||
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
LocalWorker: sectorstorage.NewLocalWorker(sectorstorage.WorkerConfig{
|
||||||
TaskTypes: m.options.workerTasks,
|
TaskTypes: m.options.workerTasks,
|
||||||
NoSwap: false,
|
NoSwap: false,
|
||||||
|
Name: m.options.workerName,
|
||||||
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
}, store, localStore, m.MinerNode, m.MinerNode, wsts),
|
||||||
LocalStore: localStore,
|
LocalStore: localStore,
|
||||||
Storage: lr,
|
Storage: lr,
|
||||||
@ -900,3 +952,46 @@ func (n *Ensemble) generateGenesis() *genesis.Template {
|
|||||||
|
|
||||||
return templ
|
return templ
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func importPreSealMeta(ctx context.Context, meta genesis.Miner, mds dtypes.MetadataDS) error {
|
||||||
|
maxSectorID := abi.SectorNumber(0)
|
||||||
|
for _, sector := range meta.Sectors {
|
||||||
|
sectorKey := datastore.NewKey(pipeline.SectorStorePrefix).ChildString(fmt.Sprint(sector.SectorID))
|
||||||
|
|
||||||
|
commD := sector.CommD
|
||||||
|
commR := sector.CommR
|
||||||
|
|
||||||
|
info := &pipeline.SectorInfo{
|
||||||
|
State: pipeline.Proving,
|
||||||
|
SectorNumber: sector.SectorID,
|
||||||
|
Pieces: []pipeline.Piece{
|
||||||
|
{
|
||||||
|
Piece: abi.PieceInfo{
|
||||||
|
Size: abi.PaddedPieceSize(meta.SectorSize),
|
||||||
|
PieceCID: commD,
|
||||||
|
},
|
||||||
|
DealInfo: nil, // todo: likely possible to get, but not really that useful
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CommD: &commD,
|
||||||
|
CommR: &commR,
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := cborutil.Dump(info)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mds.Put(ctx, sectorKey, b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sector.SectorID > maxSectorID {
|
||||||
|
maxSectorID = sector.SectorID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, binary.MaxVarintLen64)
|
||||||
|
size := binary.PutUvarint(buf, uint64(maxSectorID))
|
||||||
|
return mds.Put(ctx, datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size])
|
||||||
|
}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -78,6 +79,7 @@ type TestMiner struct {
|
|||||||
|
|
||||||
FullNode *TestFullNode
|
FullNode *TestFullNode
|
||||||
PresealDir string
|
PresealDir string
|
||||||
|
PresealSectors int
|
||||||
|
|
||||||
Libp2p struct {
|
Libp2p struct {
|
||||||
PeerID peer.ID
|
PeerID peer.ID
|
||||||
@ -128,9 +130,9 @@ func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNoti
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
s, err := tm.StorageMiner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
s, err := tm.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(tm.t, err)
|
require.NoError(tm.t, err)
|
||||||
fmt.Printf("Sectors: %d\n", len(s))
|
fmt.Printf("Sectors: %d (n %d, ex %d)\n", len(s), n, existing)
|
||||||
if len(s) >= n+existing {
|
if len(s) >= n+existing {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -140,7 +142,7 @@ func (tm *TestMiner) StartPledge(ctx context.Context, n, existing int, blockNoti
|
|||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
fmt.Printf("All sectors is fsm\n")
|
||||||
|
|
||||||
s, err := tm.StorageMiner.SectorsList(ctx)
|
s, err := tm.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(tm.t, err)
|
require.NoError(tm.t, err)
|
||||||
|
|
||||||
toCheck := map[abi.SectorNumber]struct{}{}
|
toCheck := map[abi.SectorNumber]struct{}{}
|
||||||
@ -167,9 +169,8 @@ func (tm *TestMiner) FlushSealingBatches(ctx context.Context) {
|
|||||||
|
|
||||||
const metaFile = "sectorstore.json"
|
const metaFile = "sectorstore.json"
|
||||||
|
|
||||||
func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64, seal, store bool) {
|
func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, conf func(*paths.LocalStorageMeta)) storiface.ID {
|
||||||
p, err := ioutil.TempDir("", "lotus-testsectors-")
|
p := t.TempDir()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if err := os.MkdirAll(p, 0755); err != nil {
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
if !os.IsExist(err) {
|
if !os.IsExist(err) {
|
||||||
@ -177,18 +178,20 @@ func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Join(p, metaFile))
|
_, err := os.Stat(filepath.Join(p, metaFile))
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := &paths.LocalStorageMeta{
|
cfg := &paths.LocalStorageMeta{
|
||||||
ID: storiface.ID(uuid.New().String()),
|
ID: storiface.ID(uuid.New().String()),
|
||||||
Weight: weight,
|
Weight: 10,
|
||||||
CanSeal: seal,
|
CanSeal: false,
|
||||||
CanStore: store,
|
CanStore: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conf(cfg)
|
||||||
|
|
||||||
if !(cfg.CanStore || cfg.CanSeal) {
|
if !(cfg.CanStore || cfg.CanSeal) {
|
||||||
t.Fatal("must specify at least one of CanStore or cfg.CanSeal")
|
t.Fatal("must specify at least one of CanStore or cfg.CanSeal")
|
||||||
}
|
}
|
||||||
@ -201,4 +204,18 @@ func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, weight uint64
|
|||||||
|
|
||||||
err = tm.StorageAddLocal(ctx, p)
|
err = tm.StorageAddLocal(ctx, p)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return cfg.ID
|
||||||
|
}
|
||||||
|
func (tm *TestMiner) SectorsListNonGenesis(ctx context.Context) ([]abi.SectorNumber, error) {
|
||||||
|
l, err := tm.SectorsList(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// sort just in case
|
||||||
|
sort.Slice(l, func(i, j int) bool {
|
||||||
|
return l[i] < l[j]
|
||||||
|
})
|
||||||
|
|
||||||
|
return l[tm.PresealSectors:], nil
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,8 @@ type nodeOpts struct {
|
|||||||
rpc bool
|
rpc bool
|
||||||
ownerKey *key.Key
|
ownerKey *key.Key
|
||||||
extraNodeOpts []node.Option
|
extraNodeOpts []node.Option
|
||||||
|
cfgOpts []CfgOption
|
||||||
|
fsrepo bool
|
||||||
|
|
||||||
subsystems MinerSubsystem
|
subsystems MinerSubsystem
|
||||||
mainMiner *TestMiner
|
mainMiner *TestMiner
|
||||||
@ -43,9 +45,11 @@ type nodeOpts struct {
|
|||||||
minerNoLocalSealing bool // use worker
|
minerNoLocalSealing bool // use worker
|
||||||
minerAssigner string
|
minerAssigner string
|
||||||
disallowRemoteFinalize bool
|
disallowRemoteFinalize bool
|
||||||
|
noStorage bool
|
||||||
|
|
||||||
workerTasks []sealtasks.TaskType
|
workerTasks []sealtasks.TaskType
|
||||||
workerStorageOpt func(paths.Store) paths.Store
|
workerStorageOpt func(paths.Store) paths.Store
|
||||||
|
workerName string
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
// DefaultNodeOpts are the default options that will be applied to test nodes.
|
||||||
@ -154,6 +158,14 @@ func PresealSectors(sectors int) NodeOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NoStorage initializes miners with no writable storage paths (just read-only preseal paths)
|
||||||
|
func NoStorage() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.noStorage = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ThroughRPC makes interactions with this node throughout the test flow through
|
// ThroughRPC makes interactions with this node throughout the test flow through
|
||||||
// the JSON-RPC API.
|
// the JSON-RPC API.
|
||||||
func ThroughRPC() NodeOpt {
|
func ThroughRPC() NodeOpt {
|
||||||
@ -210,9 +222,66 @@ func WithTaskTypes(tt []sealtasks.TaskType) NodeOpt {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithWorkerName(n string) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.workerName = n
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var WithSealWorkerTasks = WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal})
|
||||||
|
|
||||||
func WithWorkerStorage(transform func(paths.Store) paths.Store) NodeOpt {
|
func WithWorkerStorage(transform func(paths.Store) paths.Store) NodeOpt {
|
||||||
return func(opts *nodeOpts) error {
|
return func(opts *nodeOpts) error {
|
||||||
opts.workerStorageOpt = transform
|
opts.workerStorageOpt = transform
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func FsRepo() NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.fsrepo = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithCfgOpt(opt CfgOption) NodeOpt {
|
||||||
|
return func(opts *nodeOpts) error {
|
||||||
|
opts.cfgOpts = append(opts.cfgOpts, opt)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CfgOption func(cfg *config.FullNode) error
|
||||||
|
|
||||||
|
func SplitstoreDiscard() NodeOpt {
|
||||||
|
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||||
|
//cfg.Chainstore.Splitstore.HotStoreType = "badger" // default
|
||||||
|
//cfg.Chainstore.Splitstore.MarkSetType = "badger" // default
|
||||||
|
//cfg.Chainstore.Splitstore.HotStoreMessageRetention = 0 // default
|
||||||
|
cfg.Chainstore.EnableSplitstore = true
|
||||||
|
cfg.Chainstore.Splitstore.HotStoreFullGCFrequency = 0 // turn off full gc
|
||||||
|
cfg.Chainstore.Splitstore.ColdStoreType = "discard" // no cold store
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitstoreUniversal() NodeOpt {
|
||||||
|
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||||
|
//cfg.Chainstore.Splitstore.HotStoreType = "badger" // default
|
||||||
|
//cfg.Chainstore.Splitstore.MarkSetType = "badger" // default
|
||||||
|
//cfg.Chainstore.Splitstore.HotStoreMessageRetention = 0 // default
|
||||||
|
cfg.Chainstore.EnableSplitstore = true
|
||||||
|
cfg.Chainstore.Splitstore.HotStoreFullGCFrequency = 0 // turn off full gc
|
||||||
|
cfg.Chainstore.Splitstore.ColdStoreType = "universal" // universal bs is coldstore
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitstoreAutoPrune() NodeOpt {
|
||||||
|
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||||
|
cfg.Chainstore.Splitstore.EnableColdStoreAutoPrune = true // turn on
|
||||||
|
cfg.Chainstore.Splitstore.ColdStoreFullGCFrequency = 0 // turn off full gc
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
@ -2,13 +2,21 @@ package kit
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestWorker represents a worker enrolled in an Ensemble.
|
// TestWorker represents a worker enrolled in an Ensemble.
|
||||||
@ -29,3 +37,42 @@ type TestWorker struct {
|
|||||||
|
|
||||||
options nodeOpts
|
options nodeOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tm *TestWorker) AddStorage(ctx context.Context, t *testing.T, conf func(*paths.LocalStorageMeta)) storiface.ID {
|
||||||
|
p := t.TempDir()
|
||||||
|
|
||||||
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
|
if !os.IsExist(err) {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := os.Stat(filepath.Join(p, metaFile))
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &paths.LocalStorageMeta{
|
||||||
|
ID: storiface.ID(uuid.New().String()),
|
||||||
|
Weight: 10,
|
||||||
|
CanSeal: false,
|
||||||
|
CanStore: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
conf(cfg)
|
||||||
|
|
||||||
|
if !(cfg.CanStore || cfg.CanSeal) {
|
||||||
|
t.Fatal("must specify at least one of CanStore or cfg.CanSeal")
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.MarshalIndent(cfg, "", " ")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(filepath.Join(p, metaFile), b, 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = tm.StorageAddLocal(ctx, p)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return cfg.ID
|
||||||
|
}
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||||
lmultisig "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
lmultisig "github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/vm"
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/itests/multisig"
|
"github.com/filecoin-project/lotus/itests/multisig"
|
||||||
)
|
)
|
||||||
@ -36,7 +35,6 @@ func TestMultisig(t *testing.T) {
|
|||||||
|
|
||||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||||
kit.QuietMiningLogs()
|
kit.QuietMiningLogs()
|
||||||
vm.EnableDetailedTracing = true
|
|
||||||
|
|
||||||
blockTime := 5 * time.Millisecond
|
blockTime := 5 * time.Millisecond
|
||||||
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
client, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||||
@ -47,8 +45,6 @@ func TestMultisig(t *testing.T) {
|
|||||||
|
|
||||||
// TestMultisigReentrant sends an infinitely recursive message to a multisig.
|
// TestMultisigReentrant sends an infinitely recursive message to a multisig.
|
||||||
func TestMultisigReentrant(t *testing.T) {
|
func TestMultisigReentrant(t *testing.T) {
|
||||||
tracing := vm.EnableDetailedTracing
|
|
||||||
vm.EnableDetailedTracing = true
|
|
||||||
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
//stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001,
|
||||||
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
//stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01
|
||||||
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
//stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001
|
||||||
@ -138,7 +134,6 @@ func TestMultisigReentrant(t *testing.T) {
|
|||||||
require.NoError(t, err, "failed to replay reentrant propose message (StateWaitMsg)")
|
require.NoError(t, err, "failed to replay reentrant propose message (StateWaitMsg)")
|
||||||
|
|
||||||
require.Equal(t, 1025, countDepth(sl.ExecutionTrace))
|
require.Equal(t, 1025, countDepth(sl.ExecutionTrace))
|
||||||
vm.EnableDetailedTracing = tracing
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func countDepth(trace types.ExecutionTrace) int {
|
func countDepth(trace types.ExecutionTrace) int {
|
||||||
|
282
itests/net_test.go
Normal file
282
itests/net_test.go
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
//stm: #integration
|
||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
manet "github.com/multiformats/go-multiaddr/net"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNetConn(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
firstNode, secondNode, _, _ := kit.EnsembleTwoOne(t)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_ID_001
|
||||||
|
secondNodeID, err := secondNode.ID(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
connState := getConnState(ctx, t, firstNode, secondNodeID)
|
||||||
|
|
||||||
|
if connState != network.NotConnected {
|
||||||
|
t.Errorf("node should be not connected to peers. %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_ADDRS_LISTEN_001
|
||||||
|
addrInfo, err := secondNode.NetAddrsListen(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_CONNECT_001
|
||||||
|
err = firstNode.NetConnect(ctx, addrInfo)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("nodes failed to connect. %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_PEER_INFO_001
|
||||||
|
netPeerInfo, err := firstNode.NetPeerInfo(ctx, secondNodeID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_AGENT_VERSION_001
|
||||||
|
agent, err := firstNode.NetAgentVersion(ctx, secondNodeID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if netPeerInfo.Agent != agent {
|
||||||
|
t.Errorf("agents not matching. %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_FIND_PEER_001
|
||||||
|
secondNodePeer, err := firstNode.NetFindPeer(ctx, secondNodeID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if secondNodePeer.ID != addrInfo.ID {
|
||||||
|
t.Errorf("peer id doesn't match with listen address.")
|
||||||
|
}
|
||||||
|
|
||||||
|
connState = getConnState(ctx, t, firstNode, secondNodeID)
|
||||||
|
|
||||||
|
if connState != network.Connected {
|
||||||
|
t.Errorf("peer does not have connected state")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_PEERS_001
|
||||||
|
addrs, err := firstNode.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, 0, len(addrs))
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_DISCONNECT_001
|
||||||
|
err = firstNode.NetDisconnect(ctx, secondNodeID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("nodes failed to disconnect. %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
connState = getConnState(ctx, t, firstNode, secondNodeID)
|
||||||
|
|
||||||
|
if connState != network.NotConnected {
|
||||||
|
t.Errorf("peer should have disconnected")
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_PEERS_001
|
||||||
|
addrs, err = firstNode.NetPeers(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(addrs) > 0 {
|
||||||
|
t.Errorf("there should be no peers in network after disconnecting node")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetStat(t *testing.T) {
|
||||||
|
|
||||||
|
firstNode, secondNode, _, _ := kit.EnsembleTwoOne(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
sId, err := secondNode.ID(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
withScope := func(api interface{}, scope string) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
|
||||||
|
stat, err := firstNode.NetStat(ctx, scope)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
switch scope {
|
||||||
|
case "all":
|
||||||
|
assert.NotNil(t, stat.System)
|
||||||
|
assert.NotNil(t, stat.Transient)
|
||||||
|
case "system":
|
||||||
|
assert.NotNil(t, stat.System)
|
||||||
|
case "transient":
|
||||||
|
assert.NotNil(t, stat.Transient)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_STAT_001
|
||||||
|
t.Run("all", withScope(t, "all"))
|
||||||
|
//stm: @NETWORK_COMMON_STAT_002
|
||||||
|
t.Run("system", withScope(t, "system"))
|
||||||
|
//stm: @NETWORK_COMMON_STAT_003
|
||||||
|
t.Run("transient", withScope(t, "transient"))
|
||||||
|
//stm: @NETWORK_COMMON_STAT_004
|
||||||
|
t.Run("peer", withScope(t, fmt.Sprintf("peer:%s", sId)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetLimit(t *testing.T) {
|
||||||
|
|
||||||
|
firstNode, secondNode, _, _ := kit.EnsembleTwoOne(t)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
sId, err := secondNode.ID(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
withScope := func(api interface{}, scope string) func(t *testing.T) {
|
||||||
|
return func(t *testing.T) {
|
||||||
|
_, err := firstNode.NetLimit(ctx, scope)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_LIMIT_001
|
||||||
|
t.Run("system", withScope(t, "system"))
|
||||||
|
//stm: @NETWORK_COMMON_LIMIT_002
|
||||||
|
t.Run("transient", withScope(t, "transient"))
|
||||||
|
//stm: @NETWORK_COMMON_LIMIT_003
|
||||||
|
t.Run("peer", withScope(t, fmt.Sprintf("peer:%s", sId)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetBlockPeer(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
firstNode, secondNode, _, _ := kit.EnsembleTwoOne(t)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_ID_001
|
||||||
|
firstAddrInfo, _ := firstNode.NetAddrsListen(ctx)
|
||||||
|
firstNodeID, err := firstNode.ID(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
secondNodeID, err := secondNode.ID(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Sanity check that we're not already connected somehow
|
||||||
|
connectedness, err := secondNode.NetConnectedness(ctx, firstNodeID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.NotEqual(t, connectedness, network.Connected, "shouldn't already be connected")
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_ADD_001
|
||||||
|
err = firstNode.NetBlockAdd(ctx, api.NetBlockList{Peers: []peer.ID{secondNodeID}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_LIST_001
|
||||||
|
list, err := firstNode.NetBlockList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(list.Peers) == 0 || list.Peers[0] != secondNodeID {
|
||||||
|
t.Errorf("blocked peer not in blocked peer list")
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Error(t, secondNode.NetConnect(ctx, firstAddrInfo), "shouldn't be able to connect to second node")
|
||||||
|
connectedness, err = secondNode.NetConnectedness(ctx, firstAddrInfo.ID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.NotEqual(t, connectedness, network.Connected)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_REMOVE_001
|
||||||
|
err = firstNode.NetBlockRemove(ctx, api.NetBlockList{Peers: []peer.ID{secondNodeID}})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_LIST_001
|
||||||
|
list, err = firstNode.NetBlockList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(list.Peers) > 0 {
|
||||||
|
t.Errorf("failed to remove blocked peer from blocked peer list")
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, secondNode.NetConnect(ctx, firstAddrInfo), "failed to connect to second node")
|
||||||
|
connectedness, err = secondNode.NetConnectedness(ctx, firstAddrInfo.ID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.Equal(t, connectedness, network.Connected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNetBlockIPAddr(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
firstNode, secondNode, _, _ := kit.EnsembleTwoOne(t)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_ADDRS_LISTEN_001
|
||||||
|
firstAddrInfo, _ := firstNode.NetAddrsListen(ctx)
|
||||||
|
secondAddrInfo, _ := secondNode.NetAddrsListen(ctx)
|
||||||
|
|
||||||
|
var secondNodeIPs []string
|
||||||
|
|
||||||
|
for _, addr := range secondAddrInfo.Addrs {
|
||||||
|
ip, err := manet.ToIP(addr)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
secondNodeIPs = append(secondNodeIPs, ip.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check that we're not already connected somehow
|
||||||
|
connectedness, err := secondNode.NetConnectedness(ctx, firstAddrInfo.ID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.NotEqual(t, connectedness, network.Connected, "shouldn't already be connected")
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_ADD_001
|
||||||
|
require.NoError(t, firstNode.NetBlockAdd(ctx, api.NetBlockList{
|
||||||
|
IPAddrs: secondNodeIPs}), "failed to add blocked IPs")
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_LIST_001
|
||||||
|
list, err := firstNode.NetBlockList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, len(list.IPAddrs), len(secondNodeIPs), "expected %d blocked IPs", len(secondNodeIPs))
|
||||||
|
for _, blockedIP := range list.IPAddrs {
|
||||||
|
found := false
|
||||||
|
for _, secondNodeIP := range secondNodeIPs {
|
||||||
|
if blockedIP == secondNodeIP {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.True(t, found, "blocked IP %s is not one of secondNodeIPs", blockedIP)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Error(t, secondNode.NetConnect(ctx, firstAddrInfo), "shouldn't be able to connect to second node")
|
||||||
|
connectedness, err = secondNode.NetConnectedness(ctx, firstAddrInfo.ID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.NotEqual(t, connectedness, network.Connected)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_REMOVE_001
|
||||||
|
err = firstNode.NetBlockRemove(ctx, api.NetBlockList{IPAddrs: secondNodeIPs})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
//stm: @NETWORK_COMMON_BLOCK_LIST_001
|
||||||
|
list, err = firstNode.NetBlockList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if len(list.IPAddrs) > 0 {
|
||||||
|
t.Errorf("failed to remove blocked ip from blocked ip list")
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, secondNode.NetConnect(ctx, firstAddrInfo), "failed to connect to second node")
|
||||||
|
connectedness, err = secondNode.NetConnectedness(ctx, firstAddrInfo.ID)
|
||||||
|
require.NoError(t, err, "failed to determine connectedness")
|
||||||
|
require.Equal(t, connectedness, network.Connected)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getConnState(ctx context.Context, t *testing.T, node *kit.TestFullNode, peer peer.ID) network.Connectedness {
|
||||||
|
//stm: @NETWORK_COMMON_CONNECTEDNESS_001
|
||||||
|
connState, err := node.NetConnectedness(ctx, peer)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return connState
|
||||||
|
}
|
413
itests/path_detach_redeclare_test.go
Normal file
413
itests/path_detach_redeclare_test.go
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPathDetachRedeclare(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
wiw, wdw kit.TestWorker
|
||||||
|
)
|
||||||
|
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||||
|
FullNode(&client, kit.ThroughRPC()).
|
||||||
|
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()).
|
||||||
|
Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||||
|
Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
ens.InterconnectAll()
|
||||||
|
|
||||||
|
// check there's only one path
|
||||||
|
sps, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
|
||||||
|
var id storiface.ID
|
||||||
|
for s := range sps {
|
||||||
|
id = s
|
||||||
|
}
|
||||||
|
|
||||||
|
local, err := miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[id]), 1)
|
||||||
|
|
||||||
|
oldLocal := local[id]
|
||||||
|
|
||||||
|
// check sectors
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 0)
|
||||||
|
|
||||||
|
// detach preseal path
|
||||||
|
require.NoError(t, miner.StorageDetachLocal(ctx, local[id]))
|
||||||
|
|
||||||
|
// check that there are no paths, post checks fail
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 0)
|
||||||
|
local, err = miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 0)
|
||||||
|
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 2)
|
||||||
|
|
||||||
|
// attach a new path
|
||||||
|
newId := miner.AddStorage(ctx, t, func(cfg *paths.LocalStorageMeta) {
|
||||||
|
cfg.CanStore = true
|
||||||
|
})
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
local, err = miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[newId]), 1)
|
||||||
|
|
||||||
|
newLocal := local[newId]
|
||||||
|
|
||||||
|
// move sector data to the new path
|
||||||
|
|
||||||
|
// note: dest path already exist so we only want to .Join src
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "sealed"), newLocal).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "cache"), newLocal).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "unsealed"), newLocal).Run())
|
||||||
|
|
||||||
|
// check that sector files aren't indexed, post checks fail
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 0)
|
||||||
|
|
||||||
|
// redeclare sectors
|
||||||
|
require.NoError(t, miner.StorageRedeclareLocal(ctx, nil, false))
|
||||||
|
|
||||||
|
// check that sector files exist, post checks work
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 2)
|
||||||
|
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 0)
|
||||||
|
|
||||||
|
// remove one sector, one post check fails
|
||||||
|
require.NoError(t, os.RemoveAll(filepath.Join(newLocal, "sealed", "s-t01000-0")))
|
||||||
|
require.NoError(t, os.RemoveAll(filepath.Join(newLocal, "cache", "s-t01000-0")))
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 1)
|
||||||
|
|
||||||
|
// redeclare with no drop, still see sector in the index
|
||||||
|
require.NoError(t, miner.StorageRedeclareLocal(ctx, nil, false))
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 2)
|
||||||
|
|
||||||
|
// redeclare with drop, don't see the sector in the index
|
||||||
|
require.NoError(t, miner.StorageRedeclareLocal(ctx, nil, true))
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 1)
|
||||||
|
require.Equal(t, abi.SectorNumber(1), sps[newId][0].SectorID.Number)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPathDetachRedeclareWorker(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
wiw, wdw, sealw kit.TestWorker
|
||||||
|
)
|
||||||
|
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||||
|
FullNode(&client, kit.ThroughRPC()).
|
||||||
|
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()).
|
||||||
|
Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||||
|
Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||||
|
Worker(&miner, &sealw, kit.ThroughRPC(), kit.NoStorage(), kit.WithSealWorkerTasks).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
ens.InterconnectAll()
|
||||||
|
|
||||||
|
// check there's only one path on the miner, none on the worker
|
||||||
|
sps, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
|
||||||
|
var id storiface.ID
|
||||||
|
for s := range sps {
|
||||||
|
id = s
|
||||||
|
}
|
||||||
|
|
||||||
|
local, err := miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[id]), 1)
|
||||||
|
|
||||||
|
oldLocal := local[id]
|
||||||
|
|
||||||
|
local, err = sealw.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 0)
|
||||||
|
|
||||||
|
// check sectors
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 0)
|
||||||
|
|
||||||
|
// detach preseal path from the miner
|
||||||
|
require.NoError(t, miner.StorageDetachLocal(ctx, oldLocal))
|
||||||
|
|
||||||
|
// check that there are no paths, post checks fail
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 0)
|
||||||
|
local, err = miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 0)
|
||||||
|
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 2)
|
||||||
|
|
||||||
|
// attach a new path
|
||||||
|
newId := sealw.AddStorage(ctx, t, func(cfg *paths.LocalStorageMeta) {
|
||||||
|
cfg.CanStore = true
|
||||||
|
})
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
local, err = sealw.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[newId]), 1)
|
||||||
|
|
||||||
|
newLocalTemp := local[newId]
|
||||||
|
|
||||||
|
// move sector data to the new path
|
||||||
|
|
||||||
|
// note: dest path already exist so we only want to .Join src
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "sealed"), newLocalTemp).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "cache"), newLocalTemp).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(oldLocal, "unsealed"), newLocalTemp).Run())
|
||||||
|
|
||||||
|
// check that sector files aren't indexed, post checks fail
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 0)
|
||||||
|
|
||||||
|
// redeclare sectors
|
||||||
|
require.NoError(t, sealw.StorageRedeclareLocal(ctx, nil, false))
|
||||||
|
|
||||||
|
// check that sector files exist, post checks work
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 2)
|
||||||
|
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 0)
|
||||||
|
|
||||||
|
// drop the path from the worker
|
||||||
|
require.NoError(t, sealw.StorageDetachLocal(ctx, newLocalTemp))
|
||||||
|
local, err = sealw.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 0)
|
||||||
|
|
||||||
|
// add a new one again, and move the sectors there
|
||||||
|
newId = sealw.AddStorage(ctx, t, func(cfg *paths.LocalStorageMeta) {
|
||||||
|
cfg.CanStore = true
|
||||||
|
})
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
local, err = sealw.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[newId]), 1)
|
||||||
|
|
||||||
|
newLocal := local[newId]
|
||||||
|
|
||||||
|
// move sector data to the new path
|
||||||
|
|
||||||
|
// note: dest path already exist so we only want to .Join src
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(newLocalTemp, "sealed"), newLocal).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(newLocalTemp, "cache"), newLocal).Run())
|
||||||
|
require.NoError(t, exec.Command("cp", "--recursive", filepath.Join(newLocalTemp, "unsealed"), newLocal).Run())
|
||||||
|
|
||||||
|
// redeclare sectors
|
||||||
|
require.NoError(t, sealw.StorageRedeclareLocal(ctx, nil, false))
|
||||||
|
|
||||||
|
// check that sector files exist, post checks work
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 2)
|
||||||
|
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 0)
|
||||||
|
|
||||||
|
// remove one sector, one check fails
|
||||||
|
require.NoError(t, os.RemoveAll(filepath.Join(newLocal, "sealed", "s-t01000-0")))
|
||||||
|
require.NoError(t, os.RemoveAll(filepath.Join(newLocal, "cache", "s-t01000-0")))
|
||||||
|
checkSectors(ctx, t, client, miner, 2, 1)
|
||||||
|
|
||||||
|
// redeclare with no drop, still see sector in the index
|
||||||
|
require.NoError(t, sealw.StorageRedeclareLocal(ctx, nil, false))
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 2)
|
||||||
|
|
||||||
|
// redeclare with drop, don't see the sector in the index
|
||||||
|
require.NoError(t, sealw.StorageRedeclareLocal(ctx, nil, true))
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
require.Len(t, sps[newId], 1)
|
||||||
|
require.Equal(t, abi.SectorNumber(1), sps[newId][0].SectorID.Number)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPathDetachShared(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
wiw, wdw, sealw kit.TestWorker
|
||||||
|
)
|
||||||
|
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||||
|
FullNode(&client, kit.ThroughRPC()).
|
||||||
|
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()).
|
||||||
|
Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||||
|
Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||||
|
Worker(&miner, &sealw, kit.ThroughRPC(), kit.NoStorage(), kit.WithSealWorkerTasks).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
ens.InterconnectAll()
|
||||||
|
|
||||||
|
// check there's only one path on the miner, none on the worker
|
||||||
|
sps, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
|
||||||
|
var id storiface.ID
|
||||||
|
for s := range sps {
|
||||||
|
id = s
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that there's only one URL for the path (provided by the miner node)
|
||||||
|
si, err := miner.StorageInfo(ctx, id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, si.URLs, 1)
|
||||||
|
|
||||||
|
local, err := miner.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 1)
|
||||||
|
require.Greater(t, len(local[id]), 1)
|
||||||
|
|
||||||
|
minerLocal := local[id]
|
||||||
|
|
||||||
|
local, err = sealw.StorageLocal(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, local, 0)
|
||||||
|
|
||||||
|
// share the genesis sector path with the worker
|
||||||
|
require.NoError(t, sealw.StorageAddLocal(ctx, minerLocal))
|
||||||
|
|
||||||
|
// still just one path, but accessible from two places
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
|
||||||
|
// should see 2 urls now
|
||||||
|
si, err = miner.StorageInfo(ctx, id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, si.URLs, 2)
|
||||||
|
|
||||||
|
// drop the path from the worker
|
||||||
|
require.NoError(t, sealw.StorageDetachLocal(ctx, minerLocal))
|
||||||
|
|
||||||
|
// the path is still registered
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 1)
|
||||||
|
|
||||||
|
// but with just one URL (the miner)
|
||||||
|
si, err = miner.StorageInfo(ctx, id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, si.URLs, 1)
|
||||||
|
|
||||||
|
// now also drop from the minel and check that the path is gone
|
||||||
|
require.NoError(t, miner.StorageDetachLocal(ctx, minerLocal))
|
||||||
|
|
||||||
|
sps, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, sps, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkSectors(ctx context.Context, t *testing.T, api kit.TestFullNode, miner kit.TestMiner, expectChecked, expectBad int) {
|
||||||
|
addr, err := miner.ActorAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
mid, err := address.IDFromAddress(addr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
info, err := api.StateMinerInfo(ctx, addr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
partitions, err := api.StateMinerPartitions(ctx, addr, 0, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
par := partitions[0]
|
||||||
|
|
||||||
|
sectorInfos, err := api.StateMinerSectors(ctx, addr, &par.LiveSectors, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var tocheck []storiface.SectorRef
|
||||||
|
for _, info := range sectorInfos {
|
||||||
|
si := abi.SectorID{
|
||||||
|
Miner: abi.ActorID(mid),
|
||||||
|
Number: info.SectorNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
tocheck = append(tocheck, storiface.SectorRef{
|
||||||
|
ProofType: info.SealProof,
|
||||||
|
ID: si,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Len(t, tocheck, expectChecked)
|
||||||
|
|
||||||
|
bad, err := miner.CheckProvable(ctx, info.WindowPoStProofType, tocheck, true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, bad, expectBad)
|
||||||
|
}
|
199
itests/path_type_filters_test.go
Normal file
199
itests/path_type_filters_test.go
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPathTypeFilters(t *testing.T) {
|
||||||
|
runTest := func(t *testing.T, name string, asserts func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func())) {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
wiw, wdw kit.TestWorker
|
||||||
|
)
|
||||||
|
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||||
|
FullNode(&client, kit.ThroughRPC()).
|
||||||
|
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(2), kit.NoStorage()).
|
||||||
|
Worker(&miner, &wiw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||||
|
Worker(&miner, &wdw, kit.ThroughRPC(), kit.NoStorage(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
|
||||||
|
|
||||||
|
asserts(t, ctx, &miner, func() {
|
||||||
|
dh := kit.NewDealHarness(t, &client, &miner, &miner)
|
||||||
|
dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
runTest(t, "invalid-type-alert", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||||
|
slU := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.AllowTypes = []string{"unsealed", "seeled"}
|
||||||
|
})
|
||||||
|
|
||||||
|
storlist, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist, 2) // 1 path we've added + preseal
|
||||||
|
|
||||||
|
si, err := miner.StorageInfo(ctx, slU)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check that bad entries are filtered
|
||||||
|
require.Len(t, si.DenyTypes, 0)
|
||||||
|
require.Len(t, si.AllowTypes, 1)
|
||||||
|
require.Equal(t, "unsealed", si.AllowTypes[0])
|
||||||
|
|
||||||
|
as, err := miner.LogAlerts(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
for _, a := range as {
|
||||||
|
if a.Active && a.Type.System == "sector-index" && strings.HasPrefix(a.Type.Subsystem, "pathconf-") {
|
||||||
|
require.False(t, found)
|
||||||
|
require.Contains(t, string(a.LastActive.Message), "unknown sector file type 'seeled'")
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, found)
|
||||||
|
})
|
||||||
|
|
||||||
|
runTest(t, "seal-to-stor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||||
|
// allow all types in the sealing path
|
||||||
|
sealScratch := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanSeal = true
|
||||||
|
})
|
||||||
|
|
||||||
|
// unsealed storage
|
||||||
|
unsStor := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.AllowTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
|
||||||
|
// other storage
|
||||||
|
sealStor := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.DenyTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
|
||||||
|
storlist, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist, 4) // 3 paths we've added + preseal
|
||||||
|
|
||||||
|
run()
|
||||||
|
|
||||||
|
storlist, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist[sealScratch], 0)
|
||||||
|
require.Len(t, storlist[unsStor], 1)
|
||||||
|
require.Len(t, storlist[sealStor], 1)
|
||||||
|
|
||||||
|
require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType)
|
||||||
|
require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType)
|
||||||
|
})
|
||||||
|
|
||||||
|
runTest(t, "sealstor-unseal-allowdeny", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||||
|
// unsealed storage
|
||||||
|
unsStor := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.AllowTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
|
||||||
|
// other storage
|
||||||
|
sealStor := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.DenyTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
|
||||||
|
storlist, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist, 3) // 2 paths we've added + preseal
|
||||||
|
|
||||||
|
run()
|
||||||
|
|
||||||
|
storlist, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist[unsStor], 1)
|
||||||
|
require.Len(t, storlist[sealStor], 1)
|
||||||
|
|
||||||
|
require.Equal(t, storiface.FTUnsealed, storlist[unsStor][0].SectorFileType)
|
||||||
|
require.Equal(t, storiface.FTSealed|storiface.FTCache, storlist[sealStor][0].SectorFileType)
|
||||||
|
})
|
||||||
|
|
||||||
|
runTest(t, "seal-store-allseparate", func(t *testing.T, ctx context.Context, miner *kit.TestMiner, run func()) {
|
||||||
|
// sealing stores
|
||||||
|
slU := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.AllowTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
slS := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.AllowTypes = []string{"sealed"}
|
||||||
|
})
|
||||||
|
slC := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanSeal = true
|
||||||
|
meta.AllowTypes = []string{"cache"}
|
||||||
|
})
|
||||||
|
|
||||||
|
// storage stores
|
||||||
|
stU := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.AllowTypes = []string{"unsealed"}
|
||||||
|
})
|
||||||
|
stS := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.AllowTypes = []string{"sealed"}
|
||||||
|
})
|
||||||
|
stC := miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.CanStore = true
|
||||||
|
meta.AllowTypes = []string{"cache"}
|
||||||
|
})
|
||||||
|
|
||||||
|
storlist, err := miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist, 7) // 6 paths we've added + preseal
|
||||||
|
|
||||||
|
run()
|
||||||
|
|
||||||
|
storlist, err = miner.StorageList(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, storlist[slU], 0)
|
||||||
|
require.Len(t, storlist[slS], 0)
|
||||||
|
require.Len(t, storlist[slC], 0)
|
||||||
|
|
||||||
|
require.Len(t, storlist[stU], 1)
|
||||||
|
require.Len(t, storlist[stS], 1)
|
||||||
|
require.Len(t, storlist[stC], 1)
|
||||||
|
|
||||||
|
require.Equal(t, storiface.FTUnsealed, storlist[stU][0].SectorFileType)
|
||||||
|
require.Equal(t, storiface.FTSealed, storlist[stS][0].SectorFileType)
|
||||||
|
require.Equal(t, storiface.FTCache, storlist[stC][0].SectorFileType)
|
||||||
|
})
|
||||||
|
}
|
@ -95,7 +95,7 @@ func TestSDRUpgrade(t *testing.T) {
|
|||||||
// before.
|
// before.
|
||||||
miner.PledgeSectors(ctx, 9, 0, pledge)
|
miner.PledgeSectors(ctx, 9, 0, pledge)
|
||||||
|
|
||||||
s, err := miner.SectorsList(ctx)
|
s, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
sort.Slice(s, func(i, j int) bool {
|
sort.Slice(s, func(i, j int) bool {
|
||||||
return s[i] < s[j]
|
return s[i] < s[j]
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDealsWithFinalizeEarly(t *testing.T) {
|
func TestDealsWithFinalizeEarly(t *testing.T) {
|
||||||
@ -35,8 +36,14 @@ func TestDealsWithFinalizeEarly(t *testing.T) {
|
|||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
miner.AddStorage(ctx, t, 1000000000, true, false)
|
miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
miner.AddStorage(ctx, t, 1000000000, false, true)
|
meta.Weight = 1000000000
|
||||||
|
meta.CanSeal = true
|
||||||
|
})
|
||||||
|
miner.AddStorage(ctx, t, func(meta *paths.LocalStorageMeta) {
|
||||||
|
meta.Weight = 1000000000
|
||||||
|
meta.CanStore = true
|
||||||
|
})
|
||||||
|
|
||||||
//stm: @STORAGE_LIST_001
|
//stm: @STORAGE_LIST_001
|
||||||
sl, err := miner.StorageList(ctx)
|
sl, err := miner.StorageList(ctx)
|
||||||
|
@ -34,10 +34,10 @@ func TestMakeAvailable(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
||||||
@ -48,7 +48,7 @@ func TestMakeAvailable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
client.WaitForSectorActive(ctx, t, CCUpgrade, maddr)
|
||||||
|
|
||||||
sl, err = miner.SectorsList(ctx)
|
sl, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ func TestMakeAvailable(t *testing.T) {
|
|||||||
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false)
|
||||||
kit.AssertFilesEqual(t, inPath, outPath)
|
kit.AssertFilesEqual(t, inPath, outPath)
|
||||||
|
|
||||||
sl, err = miner.SectorsList(ctx)
|
sl, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ func TestMinerBalanceCollateral(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check that sector messages had zero value set
|
// check that sector messages had zero value set
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for _, number := range sl {
|
for _, number := range sl {
|
||||||
|
@ -34,12 +34,12 @@ func TestPreferNoUpgrade(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
||||||
Sealed := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 2)
|
Sealed := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
||||||
|
|
||||||
{
|
{
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
||||||
@ -53,7 +53,7 @@ func TestPreferNoUpgrade(t *testing.T) {
|
|||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sl, err = miner.SectorsList(ctx)
|
sl, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
}
|
}
|
||||||
@ -68,7 +68,7 @@ func TestPreferNoUpgrade(t *testing.T) {
|
|||||||
kit.AssertFilesEqual(t, inPath, outPath)
|
kit.AssertFilesEqual(t, inPath, outPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 2, "expected 2 sectors")
|
require.Len(t, sl, 2, "expected 2 sectors")
|
||||||
|
|
||||||
|
@ -31,11 +31,11 @@ func TestAbortUpgradeAvailable(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1)
|
CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner)
|
||||||
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
fmt.Printf("CCUpgrade: %d\n", CCUpgrade)
|
||||||
|
|
||||||
miner.PledgeSectors(ctx, 1, 0, nil)
|
miner.PledgeSectors(ctx, 1, 0, nil)
|
||||||
sl, err := miner.SectorsList(ctx)
|
sl, err := miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
require.Equal(t, CCUpgrade, sl[0], "unexpected sector number")
|
||||||
@ -49,7 +49,7 @@ func TestAbortUpgradeAvailable(t *testing.T) {
|
|||||||
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
err = miner.SectorMarkForUpgrade(ctx, sl[0], true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sl, err = miner.SectorsList(ctx)
|
sl, err = miner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, sl, 1, "expected 1 sector")
|
require.Len(t, sl, 1, "expected 1 sector")
|
||||||
|
|
||||||
|
408
itests/splitstore_test.go
Normal file
408
itests/splitstore_test.go
Normal file
@ -0,0 +1,408 @@
|
|||||||
|
//stm: #integration
|
||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
ipld "github.com/ipfs/go-ipld-format"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
|
"github.com/filecoin-project/go-state-types/builtin"
|
||||||
|
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||||
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
|
||||||
|
power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
lapi "github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/blockstore/splitstore"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Startup a node with hotstore and discard coldstore. Compact once and return
|
||||||
|
func TestHotstoreCompactsOnce(t *testing.T) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
// disable sync checking because efficient itests require that the node is out of sync : /
|
||||||
|
splitstore.CheckSyncGap = false
|
||||||
|
opts := []interface{}{kit.MockProofs(), kit.SplitstoreDiscard()}
|
||||||
|
full, genesisMiner, ens := kit.EnsembleMinimal(t, opts...)
|
||||||
|
bm := ens.InterconnectAll().BeginMining(4 * time.Millisecond)[0]
|
||||||
|
_ = full
|
||||||
|
_ = genesisMiner
|
||||||
|
_ = bm
|
||||||
|
|
||||||
|
waitForCompaction(ctx, t, 1, full)
|
||||||
|
require.NoError(t, genesisMiner.Stop(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// create some unreachable state
|
||||||
|
// and check that compaction carries it away
|
||||||
|
func TestHotstoreCompactCleansGarbage(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
// disable sync checking because efficient itests require that the node is out of sync : /
|
||||||
|
splitstore.CheckSyncGap = false
|
||||||
|
opts := []interface{}{kit.MockProofs(), kit.SplitstoreDiscard()}
|
||||||
|
full, genesisMiner, ens := kit.EnsembleMinimal(t, opts...)
|
||||||
|
bm := ens.InterconnectAll().BeginMining(4 * time.Millisecond)[0]
|
||||||
|
_ = full
|
||||||
|
_ = genesisMiner
|
||||||
|
|
||||||
|
// create garbage
|
||||||
|
g := NewGarbager(ctx, t, full)
|
||||||
|
garbage, e := g.Drop(ctx)
|
||||||
|
|
||||||
|
// calculate next compaction where we should actually see cleanup
|
||||||
|
|
||||||
|
// pause, check for compacting and get compaction info
|
||||||
|
// we do this to remove the (very unlikely) race where compaction index
|
||||||
|
// and compaction epoch are in the middle of update, or a whole compaction
|
||||||
|
// runs between the two
|
||||||
|
for {
|
||||||
|
bm.Pause()
|
||||||
|
if splitStoreCompacting(ctx, t, full) {
|
||||||
|
bm.Restart()
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastCompactionEpoch := splitStoreBaseEpoch(ctx, t, full)
|
||||||
|
garbageCompactionIndex := splitStoreCompactionIndex(ctx, t, full) + 1
|
||||||
|
boundary := lastCompactionEpoch + splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
|
||||||
|
for e > boundary {
|
||||||
|
boundary += splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
garbageCompactionIndex++
|
||||||
|
}
|
||||||
|
bm.Restart()
|
||||||
|
|
||||||
|
// wait for compaction to occur
|
||||||
|
waitForCompaction(ctx, t, garbageCompactionIndex, full)
|
||||||
|
|
||||||
|
// check that garbage is cleaned up
|
||||||
|
assert.False(t, g.Exists(ctx, garbage), "Garbage still exists in blockstore")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create unreachable state
|
||||||
|
// Check that it moves to coldstore
|
||||||
|
// Prune coldstore and check that it is deleted
|
||||||
|
func TestColdStorePrune(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
// disable sync checking because efficient itests require that the node is out of sync : /
|
||||||
|
splitstore.CheckSyncGap = false
|
||||||
|
opts := []interface{}{kit.MockProofs(), kit.SplitstoreUniversal(), kit.FsRepo()}
|
||||||
|
full, genesisMiner, ens := kit.EnsembleMinimal(t, opts...)
|
||||||
|
bm := ens.InterconnectAll().BeginMining(4 * time.Millisecond)[0]
|
||||||
|
_ = full
|
||||||
|
_ = genesisMiner
|
||||||
|
|
||||||
|
// create garbage
|
||||||
|
g := NewGarbager(ctx, t, full)
|
||||||
|
garbage, e := g.Drop(ctx)
|
||||||
|
assert.True(g.t, g.Exists(ctx, garbage), "Garbage not found in splitstore")
|
||||||
|
|
||||||
|
// calculate next compaction where we should actually see cleanup
|
||||||
|
|
||||||
|
// pause, check for compacting and get compaction info
|
||||||
|
// we do this to remove the (very unlikely) race where compaction index
|
||||||
|
// and compaction epoch are in the middle of update, or a whole compaction
|
||||||
|
// runs between the two
|
||||||
|
for {
|
||||||
|
bm.Pause()
|
||||||
|
if splitStoreCompacting(ctx, t, full) {
|
||||||
|
bm.Restart()
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastCompactionEpoch := splitStoreBaseEpoch(ctx, t, full)
|
||||||
|
garbageCompactionIndex := splitStoreCompactionIndex(ctx, t, full) + 1
|
||||||
|
boundary := lastCompactionEpoch + splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
|
||||||
|
for e > boundary {
|
||||||
|
boundary += splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
garbageCompactionIndex++
|
||||||
|
}
|
||||||
|
bm.Restart()
|
||||||
|
|
||||||
|
// wait for compaction to occur
|
||||||
|
waitForCompaction(ctx, t, garbageCompactionIndex, full)
|
||||||
|
|
||||||
|
bm.Pause()
|
||||||
|
|
||||||
|
// This data should now be moved to the coldstore.
|
||||||
|
// Access it without hotview to keep it there while checking that it still exists
|
||||||
|
// Only state compute uses hot view so garbager Exists backed by ChainReadObj is all good
|
||||||
|
assert.True(g.t, g.Exists(ctx, garbage), "Garbage not found in splitstore")
|
||||||
|
bm.Restart()
|
||||||
|
|
||||||
|
// wait for compaction to finsih and pause to make sure it doesn't start to avoid racing
|
||||||
|
for {
|
||||||
|
bm.Pause()
|
||||||
|
if splitStoreCompacting(ctx, t, full) {
|
||||||
|
bm.Restart()
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pruneOpts := api.PruneOpts{RetainState: int64(0), MovingGC: false}
|
||||||
|
require.NoError(t, full.ChainPrune(ctx, pruneOpts))
|
||||||
|
bm.Restart()
|
||||||
|
waitForPrune(ctx, t, 1, full)
|
||||||
|
assert.False(g.t, g.Exists(ctx, garbage), "Garbage should be removed from cold store after prune but it's still there")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoPrune(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
// disable sync checking because efficient itests require that the node is out of sync : /
|
||||||
|
splitstore.CheckSyncGap = false
|
||||||
|
opts := []interface{}{kit.MockProofs(), kit.SplitstoreUniversal(), kit.SplitstoreAutoPrune(), kit.FsRepo()}
|
||||||
|
full, genesisMiner, ens := kit.EnsembleMinimal(t, opts...)
|
||||||
|
bm := ens.InterconnectAll().BeginMining(4 * time.Millisecond)[0]
|
||||||
|
_ = full
|
||||||
|
_ = genesisMiner
|
||||||
|
|
||||||
|
// create garbage
|
||||||
|
g := NewGarbager(ctx, t, full)
|
||||||
|
garbage, e := g.Drop(ctx)
|
||||||
|
assert.True(g.t, g.Exists(ctx, garbage), "Garbage not found in splitstore")
|
||||||
|
|
||||||
|
// calculate next compaction where we should actually see cleanup
|
||||||
|
|
||||||
|
// pause, check for compacting and get compaction info
|
||||||
|
// we do this to remove the (very unlikely) race where compaction index
|
||||||
|
// and compaction epoch are in the middle of update, or a whole compaction
|
||||||
|
// runs between the two
|
||||||
|
for {
|
||||||
|
bm.Pause()
|
||||||
|
if splitStoreCompacting(ctx, t, full) {
|
||||||
|
bm.Restart()
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastCompactionEpoch := splitStoreBaseEpoch(ctx, t, full)
|
||||||
|
garbageCompactionIndex := splitStoreCompactionIndex(ctx, t, full) + 1
|
||||||
|
boundary := lastCompactionEpoch + splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
|
||||||
|
for e > boundary {
|
||||||
|
boundary += splitstore.CompactionThreshold - splitstore.CompactionBoundary
|
||||||
|
garbageCompactionIndex++
|
||||||
|
}
|
||||||
|
bm.Restart()
|
||||||
|
|
||||||
|
// wait for compaction to occur
|
||||||
|
waitForCompaction(ctx, t, garbageCompactionIndex, full)
|
||||||
|
|
||||||
|
bm.Pause()
|
||||||
|
|
||||||
|
// This data should now be moved to the coldstore.
|
||||||
|
// Access it without hotview to keep it there while checking that it still exists
|
||||||
|
// Only state compute uses hot view so garbager Exists backed by ChainReadObj is all good
|
||||||
|
assert.True(g.t, g.Exists(ctx, garbage), "Garbage not found in splitstore")
|
||||||
|
bm.Restart()
|
||||||
|
waitForPrune(ctx, t, 1, full)
|
||||||
|
assert.False(g.t, g.Exists(ctx, garbage), "Garbage should be removed from cold store through auto prune but it's still there")
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForCompaction(ctx context.Context, t *testing.T, cIdx int64, n *kit.TestFullNode) {
|
||||||
|
for {
|
||||||
|
if splitStoreCompactionIndex(ctx, t, n) >= cIdx {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForPrune(ctx context.Context, t *testing.T, pIdx int64, n *kit.TestFullNode) {
|
||||||
|
for {
|
||||||
|
if splitStorePruneIndex(ctx, t, n) >= pIdx {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitStoreCompacting(ctx context.Context, t *testing.T, n *kit.TestFullNode) bool {
|
||||||
|
info, err := n.ChainBlockstoreInfo(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
compactingRaw, ok := info["compacting"]
|
||||||
|
require.True(t, ok, "compactions not on blockstore info")
|
||||||
|
compacting, ok := compactingRaw.(bool)
|
||||||
|
require.True(t, ok, "compacting key on blockstore info wrong type")
|
||||||
|
return compacting
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitStoreBaseEpoch(ctx context.Context, t *testing.T, n *kit.TestFullNode) abi.ChainEpoch {
|
||||||
|
info, err := n.ChainBlockstoreInfo(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
baseRaw, ok := info["base epoch"]
|
||||||
|
require.True(t, ok, "'base epoch' not on blockstore info")
|
||||||
|
base, ok := baseRaw.(abi.ChainEpoch)
|
||||||
|
require.True(t, ok, "base epoch key on blockstore info wrong type")
|
||||||
|
return base
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitStoreCompactionIndex(ctx context.Context, t *testing.T, n *kit.TestFullNode) int64 {
|
||||||
|
info, err := n.ChainBlockstoreInfo(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
compact, ok := info["compactions"]
|
||||||
|
require.True(t, ok, "compactions not on blockstore info")
|
||||||
|
compactionIndex, ok := compact.(int64)
|
||||||
|
require.True(t, ok, "compaction key on blockstore info wrong type")
|
||||||
|
return compactionIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitStorePruneIndex(ctx context.Context, t *testing.T, n *kit.TestFullNode) int64 {
|
||||||
|
info, err := n.ChainBlockstoreInfo(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
prune, ok := info["prunes"]
|
||||||
|
require.True(t, ok, "prunes not on blockstore info")
|
||||||
|
pruneIndex, ok := prune.(int64)
|
||||||
|
require.True(t, ok, "prune key on blockstore info wrong type")
|
||||||
|
return pruneIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create on chain unreachable garbage for a network to exercise splitstore
|
||||||
|
// one garbage cid created at a time
|
||||||
|
//
|
||||||
|
// It works by rewriting an internally maintained miner actor's peer ID
|
||||||
|
type Garbager struct {
|
||||||
|
t *testing.T
|
||||||
|
node *kit.TestFullNode
|
||||||
|
latest trashID
|
||||||
|
|
||||||
|
// internal tracking
|
||||||
|
maddr4Data address.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
type trashID uint8
|
||||||
|
|
||||||
|
func NewGarbager(ctx context.Context, t *testing.T, n *kit.TestFullNode) *Garbager {
|
||||||
|
// create miner actor for writing garbage
|
||||||
|
|
||||||
|
g := &Garbager{
|
||||||
|
t: t,
|
||||||
|
node: n,
|
||||||
|
latest: 0,
|
||||||
|
maddr4Data: address.Undef,
|
||||||
|
}
|
||||||
|
g.createMiner(ctx)
|
||||||
|
g.newPeerID(ctx)
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
// drop returns the cid referencing the dropped garbage and the chain epoch of the drop
|
||||||
|
func (g *Garbager) Drop(ctx context.Context) (cid.Cid, abi.ChainEpoch) {
|
||||||
|
// record existing with mInfoCidAtEpoch
|
||||||
|
c := g.mInfoCid(ctx)
|
||||||
|
|
||||||
|
// update trashID and create newPeerID, dropping miner info cid c in the process
|
||||||
|
// wait for message and return the chain height that the drop occurred at
|
||||||
|
g.latest++
|
||||||
|
return c, g.newPeerID(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exists checks whether the cid is reachable through the node
|
||||||
|
func (g *Garbager) Exists(ctx context.Context, c cid.Cid) bool {
|
||||||
|
// check chain get / blockstore get
|
||||||
|
_, err := g.node.ChainReadObj(ctx, c)
|
||||||
|
if ipld.IsNotFound(err) {
|
||||||
|
return false
|
||||||
|
} else if err != nil {
|
||||||
|
g.t.Fatalf("ChainReadObj failure on existence check: %s", err)
|
||||||
|
} else {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
g.t.Fatal("unreachable")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Garbager) newPeerID(ctx context.Context) abi.ChainEpoch {
|
||||||
|
dataStr := fmt.Sprintf("Garbager-Data-%d", g.latest)
|
||||||
|
dataID := []byte(dataStr)
|
||||||
|
params, err := actors.SerializeParams(&miner2.ChangePeerIDParams{NewID: dataID})
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
|
||||||
|
msg := &types.Message{
|
||||||
|
To: g.maddr4Data,
|
||||||
|
From: g.node.DefaultKey.Address,
|
||||||
|
Method: builtin.MethodsMiner.ChangePeerID,
|
||||||
|
Params: params,
|
||||||
|
Value: types.NewInt(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
signed, err2 := g.node.MpoolPushMessage(ctx, msg, nil)
|
||||||
|
require.NoError(g.t, err2)
|
||||||
|
|
||||||
|
mw, err2 := g.node.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||||
|
require.NoError(g.t, err2)
|
||||||
|
require.Equal(g.t, exitcode.Ok, mw.Receipt.ExitCode)
|
||||||
|
return mw.Height
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Garbager) mInfoCid(ctx context.Context) cid.Cid {
|
||||||
|
ts, err := g.node.ChainHead(ctx)
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
|
||||||
|
act, err := g.node.StateGetActor(ctx, g.maddr4Data, ts.Key())
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
raw, err := g.node.ChainReadObj(ctx, act.Head)
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
var mSt miner8.State
|
||||||
|
require.NoError(g.t, mSt.UnmarshalCBOR(bytes.NewReader(raw)))
|
||||||
|
|
||||||
|
// return infoCid
|
||||||
|
return mSt.Info
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Garbager) createMiner(ctx context.Context) {
|
||||||
|
require.True(g.t, g.maddr4Data == address.Undef, "garbager miner actor already created")
|
||||||
|
owner, err := g.node.WalletDefaultAddress(ctx)
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
worker := owner
|
||||||
|
|
||||||
|
params, err := actors.SerializeParams(&power6.CreateMinerParams{
|
||||||
|
Owner: owner,
|
||||||
|
Worker: worker,
|
||||||
|
WindowPoStProofType: abi.RegisteredPoStProof_StackedDrgWindow32GiBV1,
|
||||||
|
})
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
|
||||||
|
createStorageMinerMsg := &types.Message{
|
||||||
|
To: power.Address,
|
||||||
|
From: worker,
|
||||||
|
Value: big.Zero(),
|
||||||
|
|
||||||
|
Method: power.Methods.CreateMiner,
|
||||||
|
Params: params,
|
||||||
|
}
|
||||||
|
|
||||||
|
signed, err := g.node.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
mw, err := g.node.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true)
|
||||||
|
require.NoError(g.t, err)
|
||||||
|
require.True(g.t, mw.Receipt.ExitCode == 0, "garbager's internal create miner message failed")
|
||||||
|
|
||||||
|
var retval power6.CreateMinerReturn
|
||||||
|
require.NoError(g.t, retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)))
|
||||||
|
g.maddr4Data = retval.IDAddress
|
||||||
|
}
|
@ -94,7 +94,7 @@ func TestWindowPostDispute(t *testing.T) {
|
|||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)))
|
||||||
|
|
||||||
//stm: @MINER_SECTOR_LIST_001
|
//stm: @MINER_SECTOR_LIST_001
|
||||||
evilSectors, err := evilMiner.SectorsList(ctx)
|
evilSectors, err := evilMiner.SectorsListNonGenesis(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
evilSectorNo := evilSectors[0] // only one.
|
evilSectorNo := evilSectors[0] // only one.
|
||||||
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
|
//stm: @CHAIN_STATE_SECTOR_PARTITION_001
|
||||||
|
66
itests/wdpost_no_miner_storage_test.go
Normal file
66
itests/wdpost_no_miner_storage_test.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package itests
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWindowPostNoMinerStorage(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = logging.SetLogLevel("storageminer", "INFO")
|
||||||
|
|
||||||
|
sealSectors := 2
|
||||||
|
presealSectors := 2*48*2 - sealSectors
|
||||||
|
|
||||||
|
sectors := presealSectors + sealSectors
|
||||||
|
|
||||||
|
var (
|
||||||
|
client kit.TestFullNode
|
||||||
|
miner kit.TestMiner
|
||||||
|
wiw, wdw, sealw kit.TestWorker
|
||||||
|
)
|
||||||
|
ens := kit.NewEnsemble(t, kit.LatestActorsAt(-1)).
|
||||||
|
FullNode(&client, kit.ThroughRPC()).
|
||||||
|
Miner(&miner, &client, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.PresealSectors(presealSectors), kit.NoStorage()).
|
||||||
|
Worker(&miner, &wiw, kit.ThroughRPC(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWinningPoSt})).
|
||||||
|
Worker(&miner, &wdw, kit.ThroughRPC(), kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTGenerateWindowPoSt})).
|
||||||
|
Worker(&miner, &sealw, kit.ThroughRPC(), kit.WithSealWorkerTasks).
|
||||||
|
Start()
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMiningMustPost(2 * time.Millisecond)
|
||||||
|
|
||||||
|
miner.PledgeSectors(ctx, sealSectors, 0, nil)
|
||||||
|
|
||||||
|
maddr, err := miner.ActorAddress(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
di = di.NextNotElapsed()
|
||||||
|
|
||||||
|
// wait for new sectors to become active
|
||||||
|
waitUntil := di.Close + di.WPoStChallengeWindow*2 + di.WPoStProvingPeriod
|
||||||
|
t.Logf("Wait Height > %d", waitUntil)
|
||||||
|
|
||||||
|
ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil))
|
||||||
|
t.Logf("Now Height = %d", ts.Height())
|
||||||
|
|
||||||
|
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||||
|
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(sectors)))
|
||||||
|
}
|
@ -2,11 +2,13 @@ package itests
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
@ -14,6 +16,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/itests/kit"
|
"github.com/filecoin-project/lotus/itests/kit"
|
||||||
@ -21,6 +24,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/impl"
|
"github.com/filecoin-project/lotus/node/impl"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
"github.com/filecoin-project/lotus/storage/paths"
|
"github.com/filecoin-project/lotus/storage/paths"
|
||||||
|
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
"github.com/filecoin-project/lotus/storage/wdpost"
|
"github.com/filecoin-project/lotus/storage/wdpost"
|
||||||
@ -29,7 +33,7 @@ import (
|
|||||||
func TestWorkerPledge(t *testing.T) {
|
func TestWorkerPledge(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||||
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal})) // no mock proofs
|
kit.WithSealWorkerTasks) // no mock proofs
|
||||||
|
|
||||||
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
@ -43,7 +47,7 @@ func TestWorkerPledge(t *testing.T) {
|
|||||||
func TestWorkerPledgeSpread(t *testing.T) {
|
func TestWorkerPledgeSpread(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
||||||
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal}),
|
kit.WithSealWorkerTasks,
|
||||||
kit.WithAssigner("spread"),
|
kit.WithAssigner("spread"),
|
||||||
) // no mock proofs
|
) // no mock proofs
|
||||||
|
|
||||||
@ -59,7 +63,7 @@ func TestWorkerPledgeSpread(t *testing.T) {
|
|||||||
func TestWorkerPledgeLocalFin(t *testing.T) {
|
func TestWorkerPledgeLocalFin(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(),
|
||||||
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit2, sealtasks.TTUnseal}),
|
kit.WithSealWorkerTasks,
|
||||||
kit.WithDisallowRemoteFinalize(true),
|
kit.WithDisallowRemoteFinalize(true),
|
||||||
) // no mock proofs
|
) // no mock proofs
|
||||||
|
|
||||||
@ -401,3 +405,112 @@ func TestWindowPostWorkerManualPoSt(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, lastPending, 0)
|
require.Len(t, lastPending, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSchedulerRemoveRequest(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithNoLocalSealing(true),
|
||||||
|
kit.WithTaskTypes([]sealtasks.TaskType{sealtasks.TTFetch, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTDataCid, sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit2, sealtasks.TTUnseal})) // no mock proofs
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
|
e, err := worker.Enabled(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, e)
|
||||||
|
|
||||||
|
type info struct {
|
||||||
|
CallToWork struct {
|
||||||
|
} `json:"CallToWork"`
|
||||||
|
EarlyRet interface{} `json:"EarlyRet"`
|
||||||
|
ReturnedWork interface{} `json:"ReturnedWork"`
|
||||||
|
SchedInfo struct {
|
||||||
|
OpenWindows []string `json:"OpenWindows"`
|
||||||
|
Requests []struct {
|
||||||
|
Priority int `json:"Priority"`
|
||||||
|
SchedID string `json:"SchedId"`
|
||||||
|
Sector struct {
|
||||||
|
Miner int `json:"Miner"`
|
||||||
|
Number int `json:"Number"`
|
||||||
|
} `json:"Sector"`
|
||||||
|
TaskType string `json:"TaskType"`
|
||||||
|
} `json:"Requests"`
|
||||||
|
} `json:"SchedInfo"`
|
||||||
|
Waiting interface{} `json:"Waiting"`
|
||||||
|
}
|
||||||
|
|
||||||
|
tocheck := miner.StartPledge(ctx, 1, 0, nil)
|
||||||
|
var sn abi.SectorNumber
|
||||||
|
for n := range tocheck {
|
||||||
|
sn = n
|
||||||
|
}
|
||||||
|
// Keep checking till sector state is PC2, the request should get stuck as worker cannot process PC2
|
||||||
|
for {
|
||||||
|
st, err := miner.SectorsStatus(ctx, sn, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
if st.State == api.SectorState(sealing.PreCommit2) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dump current scheduler info
|
||||||
|
schedb, err := miner.SealingSchedDiag(ctx, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
j, err := json.MarshalIndent(&schedb, "", " ")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var b info
|
||||||
|
err = json.Unmarshal(j, &b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var schedidb uuid.UUID
|
||||||
|
|
||||||
|
// cast scheduler info and get the request UUID. Call the SealingRemoveRequest()
|
||||||
|
require.Len(t, b.SchedInfo.Requests, 1)
|
||||||
|
require.Equal(t, "seal/v0/precommit/2", b.SchedInfo.Requests[0].TaskType)
|
||||||
|
|
||||||
|
schedidb, err = uuid.Parse(b.SchedInfo.Requests[0].SchedID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = miner.SealingRemoveRequest(ctx, schedidb)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Dump the schduler again and compare the UUID if a request is present
|
||||||
|
// If no request present then pass the test
|
||||||
|
scheda, err := miner.SealingSchedDiag(ctx, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
k, err := json.MarshalIndent(&scheda, "", " ")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var a info
|
||||||
|
err = json.Unmarshal(k, &a)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, a.SchedInfo.Requests, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWorkerName(t *testing.T) {
|
||||||
|
name := "thisstringisprobablynotahostnameihope"
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
_, miner, worker, ens := kit.EnsembleWorker(t, kit.WithAllSubsystems(), kit.ThroughRPC(), kit.WithWorkerName(name))
|
||||||
|
|
||||||
|
ens.InterconnectAll().BeginMining(50 * time.Millisecond)
|
||||||
|
|
||||||
|
e, err := worker.Info(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, name, e.Hostname)
|
||||||
|
|
||||||
|
ws, err := miner.WorkerStats(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var found bool
|
||||||
|
for _, stats := range ws {
|
||||||
|
if stats.Info.Hostname == name {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.True(t, found)
|
||||||
|
}
|
||||||
|
@ -160,3 +160,10 @@ func (a *Alerting) GetAlerts() []Alert {
|
|||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Alerting) IsRaised(at AlertType) bool {
|
||||||
|
a.lk.Lock()
|
||||||
|
defer a.lk.Unlock()
|
||||||
|
|
||||||
|
return a.alerts[at].Active
|
||||||
|
}
|
||||||
|
@ -96,6 +96,7 @@ func DefaultFullNode() *FullNode {
|
|||||||
MarkSetType: "badger",
|
MarkSetType: "badger",
|
||||||
|
|
||||||
HotStoreFullGCFrequency: 20,
|
HotStoreFullGCFrequency: 20,
|
||||||
|
ColdStoreFullGCFrequency: 7,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -844,6 +844,13 @@ This parameter is ONLY applicable if the retrieval pricing policy strategy has b
|
|||||||
|
|
||||||
Comment: ``,
|
Comment: ``,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "LocalWorkerName",
|
||||||
|
Type: "string",
|
||||||
|
|
||||||
|
Comment: `LocalWorkerName specifies a custom name for the builtin worker.
|
||||||
|
If set to an empty string (default) os hostname will be used`,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "Assigner",
|
Name: "Assigner",
|
||||||
Type: "string",
|
Type: "string",
|
||||||
@ -1099,6 +1106,30 @@ the compaction boundary; default is 0.`,
|
|||||||
A value of 0 disables, while a value 1 will do full GC in every compaction.
|
A value of 0 disables, while a value 1 will do full GC in every compaction.
|
||||||
Default is 20 (about once a week).`,
|
Default is 20 (about once a week).`,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "EnableColdStoreAutoPrune",
|
||||||
|
Type: "bool",
|
||||||
|
|
||||||
|
Comment: `EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||||
|
where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||||
|
Default is false`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ColdStoreFullGCFrequency",
|
||||||
|
Type: "uint64",
|
||||||
|
|
||||||
|
Comment: `ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||||
|
Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||||
|
full GC in every prune.
|
||||||
|
Default is 7 (about once every a week)`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ColdStoreRetention",
|
||||||
|
Type: "int64",
|
||||||
|
|
||||||
|
Comment: `ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||||
|
finalities beyond the compaction boundary, default is 0, -1 retains everything`,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
"StorageMiner": []DocField{
|
"StorageMiner": []DocField{
|
||||||
{
|
{
|
||||||
|
@ -65,6 +65,8 @@ func (c *StorageMiner) StorageManager() sealer.Config {
|
|||||||
ResourceFiltering: c.Storage.ResourceFiltering,
|
ResourceFiltering: c.Storage.ResourceFiltering,
|
||||||
DisallowRemoteFinalize: c.Storage.DisallowRemoteFinalize,
|
DisallowRemoteFinalize: c.Storage.DisallowRemoteFinalize,
|
||||||
|
|
||||||
|
LocalWorkerName: c.Storage.LocalWorkerName,
|
||||||
|
|
||||||
Assigner: c.Storage.Assigner,
|
Assigner: c.Storage.Assigner,
|
||||||
|
|
||||||
ParallelCheckLimit: c.Proving.ParallelCheckLimit,
|
ParallelCheckLimit: c.Proving.ParallelCheckLimit,
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user