Merge branch 'release/v1.19.0' into jen/19
This commit is contained in:
commit
8574fa1350
@ -23,11 +23,6 @@ executors:
|
||||
|
||||
|
||||
commands:
|
||||
install-deps:
|
||||
steps:
|
||||
- run: |
|
||||
sudo apt update
|
||||
sudo apt install python-is-python3
|
||||
prepare:
|
||||
parameters:
|
||||
linux:
|
||||
@ -41,9 +36,8 @@ commands:
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
condition: <<parameters.linux>>
|
||||
steps:
|
||||
- run:
|
||||
name: Check Go Version
|
||||
@ -56,6 +50,8 @@ commands:
|
||||
fi
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
- run: sudo apt-get install python-is-python3
|
||||
|
||||
- when:
|
||||
condition: <<parameters.darwin>>
|
||||
steps:
|
||||
@ -70,21 +66,12 @@ commands:
|
||||
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||
- run: go version
|
||||
- run:
|
||||
name: Install pkg-config, goreleaser, and sha512sum
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config goreleaser/tap/goreleaser coreutils
|
||||
name: Install dependencies with Homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
|
||||
- run:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run:
|
||||
name: Install hwloc
|
||||
command: |
|
||||
mkdir ~/hwloc
|
||||
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||
cd ~/hwloc
|
||||
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||
cd hwloc-2.4.1
|
||||
./configure && make && sudo make install
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
download-params:
|
||||
@ -104,28 +91,13 @@ commands:
|
||||
install_ipfs:
|
||||
steps:
|
||||
- run: |
|
||||
apt update
|
||||
apt install -y wget
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512
|
||||
if [ "$(sha512sum go-ipfs_v0.12.2_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512)" ]
|
||||
then
|
||||
echo "ipfs failed checksum check"
|
||||
exit 1
|
||||
fi
|
||||
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||
chmod +x /usr/local/bin/ipfs
|
||||
install_ipfs_macos:
|
||||
steps:
|
||||
- run: |
|
||||
curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
|
||||
pushd kubo
|
||||
sudo bash install.sh
|
||||
popd
|
||||
rm -rf kubo/
|
||||
rm kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
rm -rf kubo
|
||||
rm kubo_v0.16.0_linux-amd64.tar.gz
|
||||
git_fetch_all_tags:
|
||||
steps:
|
||||
- run:
|
||||
@ -150,13 +122,12 @@ commands:
|
||||
- run:
|
||||
name: "Run a packer build"
|
||||
command: packer build << parameters.args >> << parameters.template >>
|
||||
no_output_timeout: 30m
|
||||
no_output_timeout: 1h
|
||||
|
||||
jobs:
|
||||
mod-tidy-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
@ -164,37 +135,6 @@ jobs:
|
||||
command: |
|
||||
git --no-pager diff go.mod go.sum
|
||||
git --no-pager diff --quiet go.mod go.sum
|
||||
build-linux:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install npm
|
||||
- run:
|
||||
command: make buildall
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
path: lotus-miner
|
||||
- store_artifacts:
|
||||
path: lotus-worker
|
||||
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux
|
||||
|
||||
build-debug:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make debug
|
||||
|
||||
test:
|
||||
description: |
|
||||
@ -224,7 +164,6 @@ jobs:
|
||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -268,7 +207,6 @@ jobs:
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -311,121 +249,104 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-ntwk-calibration:
|
||||
description: |
|
||||
Compile lotus binaries for the calibration network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
|
||||
build-linux-amd64:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make calibnet
|
||||
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/linux_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- linux-calibrationnet
|
||||
build-ntwk-butterfly:
|
||||
description: |
|
||||
Compile lotus binaries for the butterfly network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make butterflynet
|
||||
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-butterflynet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd testplans/lotus-soup && go build -tags=testground .
|
||||
trigger-testplans:
|
||||
description: |
|
||||
Trigger `lotus-soup` test cases on TaaS
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
name: "download testground"
|
||||
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
|
||||
- run:
|
||||
name: "prepare .env.toml"
|
||||
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||
- run:
|
||||
name: "prepare testground home dir and link test plans"
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||
- run:
|
||||
name: "trigger deals baseline testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
- run:
|
||||
name: "trigger payment channel stress testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
build-macos:
|
||||
- linux_amd64_v1
|
||||
|
||||
build-darwin-amd64:
|
||||
description: build darwin lotus binary
|
||||
parameters:
|
||||
publish:
|
||||
default: false
|
||||
description: publish github release and homebrew?
|
||||
type: boolean
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
macos:
|
||||
xcode: "13.4.1"
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
steps:
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- install_ipfs_macos
|
||||
- restore_cache:
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_amd64_v1
|
||||
|
||||
build-darwin-arm64:
|
||||
description: self-hosted m1 runner
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
machine: true
|
||||
resource_class: filecoin-project/self-hosted-m1
|
||||
steps:
|
||||
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_arm64 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_arm64
|
||||
- run:
|
||||
command: make clean
|
||||
when: always
|
||||
- run:
|
||||
name: cleanup homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
|
||||
when: always
|
||||
|
||||
release:
|
||||
executor: golang
|
||||
parameters:
|
||||
dry-run:
|
||||
default: false
|
||||
description: should this release actually publish it's artifacts?
|
||||
type: boolean
|
||||
steps:
|
||||
- checkout
|
||||
- run: |
|
||||
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||
sudo apt update
|
||||
sudo apt install goreleaser-pro
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.publish >>
|
||||
condition: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist
|
||||
- run: goreleaser release --rm-dist --snapshot --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
- when:
|
||||
condition:
|
||||
not: << parameters.publish >>
|
||||
not: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist --snapshot
|
||||
- run: goreleaser release --rm-dist --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- store_artifacts:
|
||||
path: dist
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- dist
|
||||
- save_cache:
|
||||
name: save cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
paths:
|
||||
- "~/.rustup"
|
||||
- "~/.cargo"
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
@ -433,7 +354,7 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Update Go
|
||||
command: |
|
||||
@ -468,13 +389,11 @@ jobs:
|
||||
command: |
|
||||
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||
make appimage
|
||||
- run:
|
||||
name: prepare workspace
|
||||
command: |
|
||||
mkdir appimage
|
||||
mv Lotus-*.AppImage appimage
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/appimage && \
|
||||
mv Lotus-*.AppImage /tmp/workspace/appimage/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- appimage
|
||||
|
||||
@ -482,7 +401,6 @@ jobs:
|
||||
gofmt:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: "! go fmt ./... 2>&1 | read"
|
||||
@ -490,7 +408,6 @@ jobs:
|
||||
gen-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make deps
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
@ -505,7 +422,6 @@ jobs:
|
||||
docs-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||
@ -542,7 +458,6 @@ jobs:
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps
|
||||
@ -570,13 +485,13 @@ jobs:
|
||||
steps:
|
||||
- run:
|
||||
name: Install git jq curl
|
||||
command: apt update && apt install -y git jq curl
|
||||
command: apt update && apt install -y git jq curl sudo
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
steps:
|
||||
@ -751,8 +666,6 @@ jobs:
|
||||
name: packer
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer_build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
publish-dockerhub:
|
||||
@ -896,6 +809,11 @@ workflows:
|
||||
suite: itest-deals_publish
|
||||
target: "./itests/deals_publish_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_remote_retrieval
|
||||
suite: itest-deals_remote_retrieval
|
||||
target: "./itests/deals_remote_retrieval_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-deals_retry_deal_no_funds
|
||||
suite: itest-deals_retry_deal_no_funds
|
||||
@ -951,6 +869,11 @@ workflows:
|
||||
suite: itest-mpool_msg_uuid
|
||||
target: "./itests/mpool_msg_uuid_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-mpool_push_with_uuid
|
||||
suite: itest-mpool_push_with_uuid
|
||||
target: "./itests/mpool_push_with_uuid_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-multisig
|
||||
suite: itest-multisig
|
||||
@ -991,6 +914,11 @@ workflows:
|
||||
suite: itest-pending_deal_allocation
|
||||
target: "./itests/pending_deal_allocation_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-raft_messagesigner
|
||||
suite: itest-raft_messagesigner
|
||||
target: "./itests/raft_messagesigner_test.go"
|
||||
|
||||
- test:
|
||||
name: test-itest-remove_verifreg_datacap
|
||||
suite: itest-remove_verifreg_datacap
|
||||
@ -1130,47 +1058,11 @@ workflows:
|
||||
suite: conformance-bleeding-edge
|
||||
target: "./conformance"
|
||||
vectors-branch: specs-actors-v7
|
||||
- trigger-testplans:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- build-debug
|
||||
- build-linux:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-calibration:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-butterfly:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
name: publish-macos
|
||||
publish: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-macos:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- build-appimage:
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- build-linux-amd64:
|
||||
name: "Build ( linux / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
@ -1178,11 +1070,30 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-linux
|
||||
linux: true
|
||||
- build-darwin-amd64:
|
||||
name: "Build ( darwin / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-darwin-arm64:
|
||||
name: "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- release:
|
||||
name: "Release"
|
||||
requires:
|
||||
- build-linux
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -1190,11 +1101,31 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- release:
|
||||
name: "Release (dry-run)"
|
||||
dry-run: true
|
||||
requires:
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
name: "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-appimage
|
||||
name: "Publish AppImage"
|
||||
appimage: true
|
||||
requires:
|
||||
- build-appimage
|
||||
- "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -1203,14 +1134,14 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-all-in-one
|
||||
name: "Publish ECR (lotus-all-in-one)"
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-dev
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
target: lotus-all-in-one
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-test
|
||||
name: "Publish ECR (lotus-test)"
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-test
|
||||
@ -1261,7 +1192,7 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub
|
||||
name: "Publish Dockerhub (stable)"
|
||||
tag: stable
|
||||
filters:
|
||||
branches:
|
||||
@ -1269,7 +1200,17 @@ workflows:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish-dockerhub:
|
||||
name: "Publish Dockerhub (candidate)"
|
||||
tag: candidate
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
|
||||
nightly:
|
||||
triggers:
|
||||
@ -1291,10 +1232,10 @@ workflows:
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub-nightly
|
||||
tag: nightly
|
||||
monthly:
|
||||
biweekly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1 * *"
|
||||
cron: "0 0 1,15 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
@ -23,11 +23,6 @@ executors:
|
||||
|
||||
|
||||
commands:
|
||||
install-deps:
|
||||
steps:
|
||||
- run: |
|
||||
sudo apt update
|
||||
sudo apt install python-is-python3
|
||||
prepare:
|
||||
parameters:
|
||||
linux:
|
||||
@ -41,9 +36,8 @@ commands:
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
condition: <<parameters.linux>>
|
||||
steps:
|
||||
- run:
|
||||
name: Check Go Version
|
||||
@ -56,6 +50,8 @@ commands:
|
||||
fi
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
- run: sudo apt-get install python-is-python3
|
||||
|
||||
- when:
|
||||
condition: <<parameters.darwin>>
|
||||
steps:
|
||||
@ -70,21 +66,12 @@ commands:
|
||||
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||
- run: go version
|
||||
- run:
|
||||
name: Install pkg-config, goreleaser, and sha512sum
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config goreleaser/tap/goreleaser coreutils
|
||||
name: Install dependencies with Homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
|
||||
- run:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run:
|
||||
name: Install hwloc
|
||||
command: |
|
||||
mkdir ~/hwloc
|
||||
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||
cd ~/hwloc
|
||||
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||
cd hwloc-2.4.1
|
||||
./configure && make && sudo make install
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
download-params:
|
||||
@ -104,28 +91,13 @@ commands:
|
||||
install_ipfs:
|
||||
steps:
|
||||
- run: |
|
||||
apt update
|
||||
apt install -y wget
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512
|
||||
if [ "$(sha512sum go-ipfs_v0.12.2_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512)" ]
|
||||
then
|
||||
echo "ipfs failed checksum check"
|
||||
exit 1
|
||||
fi
|
||||
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||
chmod +x /usr/local/bin/ipfs
|
||||
install_ipfs_macos:
|
||||
steps:
|
||||
- run: |
|
||||
curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
|
||||
pushd kubo
|
||||
sudo bash install.sh
|
||||
popd
|
||||
rm -rf kubo/
|
||||
rm kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
rm -rf kubo
|
||||
rm kubo_v0.16.0_linux-amd64.tar.gz
|
||||
git_fetch_all_tags:
|
||||
steps:
|
||||
- run:
|
||||
@ -150,13 +122,12 @@ commands:
|
||||
- run:
|
||||
name: "Run a packer build"
|
||||
command: packer build << parameters.args >> << parameters.template >>
|
||||
no_output_timeout: 30m
|
||||
no_output_timeout: 1h
|
||||
|
||||
jobs:
|
||||
mod-tidy-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
@ -164,37 +135,6 @@ jobs:
|
||||
command: |
|
||||
git --no-pager diff go.mod go.sum
|
||||
git --no-pager diff --quiet go.mod go.sum
|
||||
build-linux:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install npm
|
||||
- run:
|
||||
command: make buildall
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
path: lotus-miner
|
||||
- store_artifacts:
|
||||
path: lotus-worker
|
||||
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux
|
||||
|
||||
build-debug:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make debug
|
||||
|
||||
test:
|
||||
description: |
|
||||
@ -224,7 +164,6 @@ jobs:
|
||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -268,7 +207,6 @@ jobs:
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -311,121 +249,104 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-ntwk-calibration:
|
||||
description: |
|
||||
Compile lotus binaries for the calibration network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
|
||||
build-linux-amd64:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make calibnet
|
||||
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/linux_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- linux-calibrationnet
|
||||
build-ntwk-butterfly:
|
||||
description: |
|
||||
Compile lotus binaries for the butterfly network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make butterflynet
|
||||
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-butterflynet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd testplans/lotus-soup && go build -tags=testground .
|
||||
trigger-testplans:
|
||||
description: |
|
||||
Trigger `lotus-soup` test cases on TaaS
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
name: "download testground"
|
||||
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
|
||||
- run:
|
||||
name: "prepare .env.toml"
|
||||
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||
- run:
|
||||
name: "prepare testground home dir and link test plans"
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||
- run:
|
||||
name: "trigger deals baseline testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
- run:
|
||||
name: "trigger payment channel stress testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
build-macos:
|
||||
- linux_amd64_v1
|
||||
|
||||
build-darwin-amd64:
|
||||
description: build darwin lotus binary
|
||||
parameters:
|
||||
publish:
|
||||
default: false
|
||||
description: publish github release and homebrew?
|
||||
type: boolean
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
macos:
|
||||
xcode: "13.4.1"
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
steps:
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- install_ipfs_macos
|
||||
- restore_cache:
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_amd64_v1
|
||||
|
||||
build-darwin-arm64:
|
||||
description: self-hosted m1 runner
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
machine: true
|
||||
resource_class: filecoin-project/self-hosted-m1
|
||||
steps:
|
||||
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_arm64 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_arm64
|
||||
- run:
|
||||
command: make clean
|
||||
when: always
|
||||
- run:
|
||||
name: cleanup homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
|
||||
when: always
|
||||
|
||||
release:
|
||||
executor: golang
|
||||
parameters:
|
||||
dry-run:
|
||||
default: false
|
||||
description: should this release actually publish it's artifacts?
|
||||
type: boolean
|
||||
steps:
|
||||
- checkout
|
||||
- run: |
|
||||
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||
sudo apt update
|
||||
sudo apt install goreleaser-pro
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.publish >>
|
||||
condition: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist
|
||||
- run: goreleaser release --rm-dist --snapshot --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
- when:
|
||||
condition:
|
||||
not: << parameters.publish >>
|
||||
not: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist --snapshot
|
||||
- run: goreleaser release --rm-dist --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- store_artifacts:
|
||||
path: dist
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- dist
|
||||
- save_cache:
|
||||
name: save cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
paths:
|
||||
- "~/.rustup"
|
||||
- "~/.cargo"
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
@ -433,7 +354,7 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Update Go
|
||||
command: |
|
||||
@ -468,13 +389,11 @@ jobs:
|
||||
command: |
|
||||
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||
make appimage
|
||||
- run:
|
||||
name: prepare workspace
|
||||
command: |
|
||||
mkdir appimage
|
||||
mv Lotus-*.AppImage appimage
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/appimage && \
|
||||
mv Lotus-*.AppImage /tmp/workspace/appimage/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- appimage
|
||||
|
||||
@ -482,7 +401,6 @@ jobs:
|
||||
gofmt:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: "! go fmt ./... 2>&1 | read"
|
||||
@ -490,7 +408,6 @@ jobs:
|
||||
gen-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make deps
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
@ -505,7 +422,6 @@ jobs:
|
||||
docs-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||
@ -542,7 +458,6 @@ jobs:
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps
|
||||
@ -570,13 +485,13 @@ jobs:
|
||||
steps:
|
||||
- run:
|
||||
name: Install git jq curl
|
||||
command: apt update && apt install -y git jq curl
|
||||
command: apt update && apt install -y git jq curl sudo
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
steps:
|
||||
@ -751,8 +666,6 @@ jobs:
|
||||
name: packer
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer_build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
publish-dockerhub:
|
||||
@ -840,47 +753,11 @@ workflows:
|
||||
suite: conformance-bleeding-edge
|
||||
target: "./conformance"
|
||||
vectors-branch: specs-actors-v7
|
||||
- trigger-testplans:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- build-debug
|
||||
- build-linux:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-calibration:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-butterfly:
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
name: publish-macos
|
||||
publish: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-macos:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- build-appimage:
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- build-linux-amd64:
|
||||
name: "Build ( linux / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
@ -888,11 +765,30 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-linux
|
||||
linux: true
|
||||
- build-darwin-amd64:
|
||||
name: "Build ( darwin / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-darwin-arm64:
|
||||
name: "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- release:
|
||||
name: "Release"
|
||||
requires:
|
||||
- build-linux
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -900,11 +796,31 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- release:
|
||||
name: "Release (dry-run)"
|
||||
dry-run: true
|
||||
requires:
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
name: "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-appimage
|
||||
name: "Publish AppImage"
|
||||
appimage: true
|
||||
requires:
|
||||
- build-appimage
|
||||
- "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -913,14 +829,14 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-all-in-one
|
||||
name: "Publish ECR (lotus-all-in-one)"
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-dev
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
target: lotus-all-in-one
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-test
|
||||
name: "Publish ECR (lotus-test)"
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-test
|
||||
@ -971,7 +887,7 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub
|
||||
name: "Publish Dockerhub (stable)"
|
||||
tag: stable
|
||||
filters:
|
||||
branches:
|
||||
@ -979,7 +895,17 @@ workflows:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish-dockerhub:
|
||||
name: "Publish Dockerhub (candidate)"
|
||||
tag: candidate
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
|
||||
nightly:
|
||||
triggers:
|
||||
@ -1001,10 +927,10 @@ workflows:
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub-nightly
|
||||
tag: nightly
|
||||
monthly:
|
||||
biweekly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1 * *"
|
||||
cron: "0 0 1,15 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
24
.github/pull_request_template.md
vendored
24
.github/pull_request_template.md
vendored
@ -1,21 +1,23 @@
|
||||
## Related Issues
|
||||
<!-- link all issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made.-->
|
||||
<!-- Link issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made -->
|
||||
|
||||
## Proposed Changes
|
||||
<!-- provide a clear list of the changes being made-->
|
||||
|
||||
<!-- A clear list of the changes being made -->
|
||||
|
||||
## Additional Info
|
||||
<!-- callouts, links to documentation, and etc-->
|
||||
<!-- Callouts, links to documentation, and etc -->
|
||||
|
||||
## Checklist
|
||||
|
||||
Before you mark the PR ready for review, please make sure that:
|
||||
- [ ] All commits have a clear commit message.
|
||||
- [ ] The PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_,_perf_, _refactor_, _revert_, _style_, _test_
|
||||
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_, _deps_
|
||||
- [ ] This PR has tests for new functionality or change in behaviour
|
||||
- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
|
||||
- [ ] Commits have a clear commit message.
|
||||
- [ ] PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
||||
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
||||
- [ ] New features have usage guidelines and / or documentation updates in
|
||||
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
||||
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
- [ ] Tests exist for new functionality or change in behavior
|
||||
- [ ] CI is green
|
||||
|
125
.goreleaser.yaml
125
.goreleaser.yaml
@ -1,119 +1,65 @@
|
||||
project_name: lotus
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- make deps
|
||||
|
||||
universal_binaries:
|
||||
- id: lotus
|
||||
replace: true
|
||||
name_template: lotus
|
||||
ids:
|
||||
- lotus_darwin_amd64
|
||||
- lotus_darwin_arm64
|
||||
- id: lotus-miner
|
||||
replace: true
|
||||
name_template: lotus-miner
|
||||
ids:
|
||||
- lotus-miner_darwin_amd64
|
||||
- lotus-miner_darwin_arm64
|
||||
- id: lotus-worker
|
||||
replace: true
|
||||
name_template: lotus-worker
|
||||
ids:
|
||||
- lotus-worker_darwin_amd64
|
||||
- lotus-worker_darwin_arm64
|
||||
|
||||
builds:
|
||||
- id: lotus_darwin_amd64
|
||||
main: ./cmd/lotus
|
||||
- id: lotus
|
||||
binary: lotus
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-miner_darwin_amd64
|
||||
main: ./cmd/lotus-miner
|
||||
- arm64
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus
|
||||
- id: lotus-miner
|
||||
binary: lotus-miner
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-worker_darwin_amd64
|
||||
main: ./cmd/lotus-worker
|
||||
- arm64
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner
|
||||
- id: lotus-worker
|
||||
binary: lotus-worker
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus_darwin_arm64
|
||||
main: ./cmd/lotus
|
||||
binary: lotus
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-miner_darwin_arm64
|
||||
main: ./cmd/lotus-miner
|
||||
binary: lotus-miner
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-worker_darwin_arm64
|
||||
main: ./cmd/lotus-worker
|
||||
binary: lotus-worker
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
# - id: linux
|
||||
# main: ./cmd/lotus
|
||||
# binary: lotus
|
||||
# goos:
|
||||
# - linux
|
||||
# goarch:
|
||||
# - amd64
|
||||
# env:
|
||||
# - CGO_ENABLED=1
|
||||
# ldflags:
|
||||
# - -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker
|
||||
|
||||
archives:
|
||||
- id: primary
|
||||
@ -129,8 +75,7 @@ release:
|
||||
owner: filecoin-project
|
||||
name: lotus
|
||||
prerelease: auto
|
||||
name_template: "Release v{{.Version}}"
|
||||
|
||||
name_template: "v{{.Version}}"
|
||||
|
||||
brews:
|
||||
- tap:
|
||||
@ -151,10 +96,8 @@ brews:
|
||||
homepage: "https://filecoin.io"
|
||||
description: "A homebrew cask for installing filecoin-project/lotus on MacOS"
|
||||
license: MIT
|
||||
skip_upload: auto
|
||||
dependencies:
|
||||
- name: pkg-config
|
||||
- name: jq
|
||||
- name: bzr
|
||||
- name: hwloc
|
||||
|
||||
# produced manually so we can include cid checksums
|
||||
|
168
CHANGELOG.md
168
CHANGELOG.md
@ -1,5 +1,171 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.19.0 / 2022-12-07
|
||||
|
||||
This is an optional feature release of Lotus. This feature release includes the SplitStore beta, the experimental Lotus node cluster feature, as well as numerous enhancments and bugfixes.
|
||||
|
||||
## Highlights
|
||||
### 🟢 SplitStore v2(Beta) 🟢
|
||||
|
||||
Splitstore aims to reduce the node performance impact that's caused by the Filecoin's very large, and continuously growing chain datastore by having a hot and cold blockstore. You can find more about the Splitstore implementation [here](https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md).
|
||||
Splitstore has three basic modes for node operators to configure according to your needs:
|
||||
- `discard`: hotstore only, automatically archive out-of-scope objects that are beyond 4 finalities(3600 epochs).
|
||||
- `universal`: stores all chain data that's beyond 4 finalities into coldstore.
|
||||
- `messages`: only stores on-chain messages into coldstore.
|
||||
|
||||
The `EnableColdStoreAutoPrune=` configuration is being deprecated in this release, as there is only ever one compaction running. We welcome all node operators to try the new feature and let us know [here](https://github.com/filecoin-project/lotus/discussions/9179) if you have any feedback!
|
||||
There are more configuration one may set, you can read the full documentation about the SplitStoreV2 here: https://lotus.filecoin.io/lotus/configure/splitstore/.
|
||||
|
||||
### 🧪 Node Cluster (*EXPERIMENTAL.*) 🧪
|
||||
The Lotus HA node cluster feature allows you to run multiple Lotus daemons for the same lotus-miner increasing resiliency. We welcome all Lotus users to join the early testing for this feature and provide your feedback. Please note that this feature is targeted towards more enterprise users of Lotus and requires at least 3 lotus nodes to be set up in a cluster.
|
||||
Check out the documentation here: https://lotus.filecoin.io/lotus/configure/clusters/
|
||||
|
||||
### ⭐️ SnapDeals Enhancements ⭐️
|
||||
Numerous SnapDeals related improvements and fixes made it into this release before the code freeze. Some the highlights of the issues that has been fixed in this feature release are:
|
||||
|
||||
- *Unable to snap-up a sector again if something went wrong.* - This has now been fixed ✅
|
||||
- *Error messages on loop during an open deadline.* - This has now been fixed ✅
|
||||
|
||||
## New features
|
||||
- feat:splitstore:single compaction that can handle prune aka two marksets one compaction (#9571) ([filecoin-project/lotus#9571](https://github.com/filecoin-project/lotus/pull/9571))
|
||||
- Introduces a new SplitStore-mode, `messages`, which will only store on-chain messages. Fixes previously issues with regards to `AutoPrune` not compacting the coldstore. [Link to documentation](https://lotus.filecoin.io/lotus/configure/splitstore/)
|
||||
- feat: Raft consensus for lotus nodes in a cluster ([filecoin-project/lotus#9294](https://github.com/filecoin-project/lotus/pull/9294))
|
||||
- Adds the experimental node cluster feature.
|
||||
- feat: storage: Force exit GenerateSingleVanillaProof on cancelled context ([filecoin-project/lotus#9613](https://github.com/filecoin-project/lotus/pull/9613))
|
||||
- `GenerateSingleVanillaProof` now respects context, which means that it will skip slow to read sectors :snail: and return a context error. Instead of being blocked forever if storage reads where blocked (e.g disconnected NFS).
|
||||
- feat: wdpost: Configurable pre-check timeouts ([filecoin-project/lotus#9680](https://github.com/filecoin-project/lotus/pull/9680))
|
||||
- Adds configuration knobs for setting custom amount of time a proving pre-check can take before a sector and partition will be skipped. [Link to documentation](https://lotus.filecoin.io/storage-providers/advanced-configurations/proving/#pre-check-sector-timeout)
|
||||
- feat: chain: future proof the from & to address protocols ([filecoin-project/lotus#9515](https://github.com/filecoin-project/lotus/pull/9515))
|
||||
- This lets us add new address protocols to go-address without implicitly accepting them in messages on the network.
|
||||
- feat: Retrieval into remote blockstores ([filecoin-project/lotus#9565](https://github.com/filecoin-project/lotus/pull/9565))
|
||||
- Makes it possible to point retrievals at a network-backed blockstore.
|
||||
- feat: Add node uptime rpc / output in info command ([filecoin-project/lotus#9436](https://github.com/filecoin-project/lotus/pull/9436))
|
||||
- Adds node uptime stats to the `lotus-miner info` and `lotus info` commands
|
||||
- feat: wdpost: Add ability to only have single partition per msg for partitions with… ([filecoin-project/lotus#9413](https://github.com/filecoin-project/lotus/pull/9413))
|
||||
- Adds a configuration option to have a single partition per PoSt Message for partitions containing recovering sectors.
|
||||
- feat: miner paramfetch: Don't fetch param files when not needed ([filecoin-project/lotus#9391](https://github.com/filecoin-project/lotus/pull/9391))
|
||||
- A Lotus-Miner processes that has disabled local PoSt / C2 / PR2 does not need the param-files. This makes node startup much faster, reducing downtime by a lot when restarts are needed.
|
||||
- feat: client: Add retrieval deal ID and bytes transferred to retrieval output ([filecoin-project/lotus#9398](https://github.com/filecoin-project/lotus/pull/9398))
|
||||
- Appends retrieval deal ID and bytes transferred to the retrieval output.
|
||||
- feat: dealpublisher: check for duplicate deals before adding ([filecoin-project/lotus#9365](https://github.com/filecoin-project/lotus/pull/9365))
|
||||
- feat: Drop active retrieval check (#764) ([filecoin-project/go-fil-markets#764](https://github.com/filecoin-project/go-fil-markets/pull/764))
|
||||
- feat(retrievalmarkets): expose GetDynamicAsk (#748) ([filecoin-project/go-fil-markets#748](https://github.com/filecoin-project/go-fil-markets/pull/748))
|
||||
- feat: handle retrieval queries for unindexed identity payload CIDs (#747) ([filecoin-project/go-fil-markets#747](https://github.com/filecoin-project/go-fil-markets/pull/747))
|
||||
- feat: add a method for validating an address for a network version (#115) ([filecoin-project/go-state-types#115](https://github.com/filecoin-project/go-state-types/pull/115))
|
||||
|
||||
## Improvements
|
||||
- fix: miner-cli: Fix lotus-miner proving check ([filecoin-project/lotus#9643](https://github.com/filecoin-project/lotus/pull/9643))
|
||||
- Fixes the issue where the `lotus-miner proving check` command always outputted `Error: rg is nil`
|
||||
- fix: sealing pipeline: Clear CreationTime when starting sector upgrade ([filecoin-project/lotus#9642](https://github.com/filecoin-project/lotus/pull/9642))
|
||||
- Fixes the issue where an aborted SnapDeal upgrade could no longer be retried with SnapDeals.
|
||||
- fix:sealing-fsm:wait mutable fsm state for immutable sector upgrade error ([filecoin-project/lotus#9598](https://github.com/filecoin-project/lotus/pull/9598))
|
||||
- Creating a new WaitMutable state - now if the deadline is open and the sectors are trying finalize they will wait on the worker until the deadline has closed. Important to note that they will not finalize as soon as the deadline closes, they will wait 1h before continuing. Fixes the previous issue where upgraded Snap-sectors for an open deadline cause a lot of `error-messages` and `p_aux` issues
|
||||
- fix: cli: add beneficiary info to lotus-miner actor control list ([filecoin-project/lotus#9632](https://github.com/filecoin-project/lotus/pull/9632))
|
||||
- Adds the beneficiary address to the `lotus-miner actor control list` output.
|
||||
- fix: sealing pipeine: Release assigned deals on snapdeals abort ([filecoin-project/lotus#9601](https://github.com/filecoin-project/lotus/pull/9601))
|
||||
- fix: docker: make compatible with arm platform ([filecoin-project/lotus#9363](https://github.com/filecoin-project/lotus/pull/9363))
|
||||
- Makes the `Dockerfile.lotus` compatible with ARM-platforms (e.g Mac M1).
|
||||
- fix: post worker sched: Don't check worker session in a busy loop ([filecoin-project/lotus#9495](https://github.com/filecoin-project/lotus/pull/9495))
|
||||
- Fixes a looping pattern which could result in a flood of requests between `lotus-miner`<->`lotus-worker`, potentially exhausting resources needed to make http requests, that lead to all sorts of random RPC-related issues.
|
||||
- fix: miner: init miner's with 32GiB sectors by default ([filecoin-project/lotus#9364](https://github.com/filecoin-project/lotus/pull/9364))
|
||||
- Makes the `lotus-miner init` defualt to 32GiB sectors.
|
||||
- fix: store identity CIDs in CARs for online deals (#749) ([filecoin-project/go-fil-markets#749](https://github.com/filecoin-project/go-fil-markets/pull/749))
|
||||
- fix: cliutil: Fix URL-based API endpoint parsing
|
||||
|
||||
## Dependencies
|
||||
- deps: upgrade go-merkledag to 0.8.1 (#9717)
|
||||
- deps: Update go-fil-markets to v1.25.0 ([filecoin-project/lotus#9633](https://github.com/filecoin-project/lotus/pull/9633))
|
||||
- deps: upgrade go-merkledag to 0.8.0 ([filecoin-project/lotus#9455](https://github.com/filecoin-project/lotus/pull/9455))
|
||||
|
||||
## Others
|
||||
- fix/build: Update Zondax/hid to 0.9.1
|
||||
- refactor: sealing: minor refactor of FinalizeReplicaUpdate ([filecoin-project/lotus#9614](https://github.com/filecoin-project/lotus/pull/9614))
|
||||
- update ffi to 280c4f8b94fd46dc (#9608) ([filecoin-project/lotus#9608](https://github.com/filecoin-project/lotus/pull/9608))
|
||||
- fix: tvx: make it work with the FVM ([filecoin-project/lotus#9604](https://github.com/filecoin-project/lotus/pull/9604))
|
||||
- fix: autobatch: remove potential deadlock when a block is missing ([filecoin-project/lotus#9602](https://github.com/filecoin-project/lotus/pull/9602))
|
||||
- feat: shed: set control address: add dump bytes option ([filecoin-project/lotus#9572](https://github.com/filecoin-project/lotus/pull/9572))
|
||||
- feat: shed: Online export-car ([filecoin-project/lotus#9590](https://github.com/filecoin-project/lotus/pull/9590))
|
||||
- fix: chain: Update chain.go ([filecoin-project/lotus#9373](https://github.com/filecoin-project/lotus/pull/9373))
|
||||
- fix: fvm: Allow setting local bundles for Debug FVM for av 9+ ([filecoin-project/lotus#9509](https://github.com/filecoin-project/lotus/pull/9509))
|
||||
- fix: ux: Add outputs to wallet commands ([filecoin-project/lotus#9416](https://github.com/filecoin-project/lotus/pull/9416))
|
||||
- fix: ux: specify arg in `net ping` cmd ([filecoin-project/lotus#9459](https://github.com/filecoin-project/lotus/pull/9459))
|
||||
- fix: cli: renew --only-cc with sectorfile ([filecoin-project/lotus#9402](https://github.com/filecoin-project/lotus/pull/9402))
|
||||
- fix: bstore: Handle codecs correctly in membstore Get ([filecoin-project/lotus#9471](https://github.com/filecoin-project/lotus/pull/9471))
|
||||
- fix: not multiplied by the number of seconds ([filecoin-project/lotus#9460](https://github.com/filecoin-project/lotus/pull/9460))
|
||||
- Makefile: Drop rarely used binaries from build-devnets (#9513) ([filecoin-project/lotus#9513](https://github.com/filecoin-project/lotus/pull/9513))
|
||||
- _ci_ Remove unneeded homebrew deps ([filecoin-project/lotus#9559](https://github.com/filecoin-project/lotus/pull/9559))
|
||||
- _ci_: Have apt-get wait until it can get a lock in packer builds ([filecoin-project/lotus#9534](https://github.com/filecoin-project/lotus/pull/9534))
|
||||
- _ci_ Appimage go1.18.1 fix ([filecoin-project/lotus#9496](https://github.com/filecoin-project/lotus/pull/9496))
|
||||
- _ci_: Fix failing Digital Ocean and Amazon Machine Image release process ([filecoin-project/lotus#9425](https://github.com/filecoin-project/lotus/pull/9425))
|
||||
- _ci_: Don't publish new homebrew releases for RC builds ([filecoin-project/lotus#9350](https://github.com/filecoin-project/lotus/pull/9350))
|
||||
- _ci_: Use golang 1.18.1 to build appimage ([filecoin-project/lotus#9386](https://github.com/filecoin-project/lotus/pull/9386))
|
||||
- _ci_ Refactor release pipeline to better support m1 builds ([filecoin-project/lotus#9600](https://github.com/filecoin-project/lotus/pull/9600))
|
||||
- _ci_: Rely on local env varibale instead of context ([filecoin-project/lotus#9740](https://github.com/filecoin-project/lotus/pull/9740))
|
||||
- feat: shed: FIP0036 post poll result processing ([filecoin-project/lotus#9387](https://github.com/filecoin-project/lotus/pull/9387))
|
||||
- misc: github: Cleanup PR template ([filecoin-project/lotus#9472](https://github.com/filecoin-project/lotus/pull/9472))
|
||||
- docs: release template: Mention codegen in release prep ([filecoin-project/lotus#9430](https://github.com/filecoin-project/lotus/pull/9430))
|
||||
- chore: merge releases (v1.17.2) into master ([filecoin-project/lotus#9433](https://github.com/filecoin-project/lotus/pull/9433))
|
||||
- chore: merge release/v1.18.0 into master ([filecoin-project/lotus#9597](https://github.com/filecoin-project/lotus/pull/9597))
|
||||
- chore:shed: Teach shed/sim to understand --tipset=@nnn notation ([filecoin-project/lotus#9434](https://github.com/filecoin-project/lotus/pull/9434))
|
||||
- _chore_: Upgrade `hid` ([filecoin-project/lotus#9406](https://github.com/filecoin-project/lotus/pull/9406))
|
||||
- chore: release: Update `release_issue_template` ([filecoin-project/lotus#9150](https://github.com/filecoin-project/lotus/pull/9150))
|
||||
- chore: update lotus version to 1.19.0-rc1
|
||||
- chore: merge release into master ([filecoin-project/lotus#9657](https://github.com/filecoin-project/lotus/pull/9657))
|
||||
- Backport: #9061 rpc errors ([filecoin-project/lotus#9384](https://github.com/filecoin-project/lotus/pull/9384))
|
||||
- shed: util: get all msig ([filecoin-project/lotus#9322](https://github.com/filecoin-project/lotus/pull/9322))
|
||||
- fix: test: simplify TestDeadlineToggling ([filecoin-project/lotus#9356](https://github.com/filecoin-project/lotus/pull/9356))
|
||||
- fix: build: set PropagationDelaySecs correctly ([filecoin-project/lotus#9358](https://github.com/filecoin-project/lotus/pull/9358))
|
||||
- fix: test: flaky TestDeadlineToggling around nulls (#9354) ([filecoin-project/lotus#9354](https://github.com/filecoin-project/lotus/pull/9354))
|
||||
- fix: retrievals: price-per-byte calculation fix ([filecoin-project/lotus#9353](https://github.com/filecoin-project/lotus/pull/9353))
|
||||
- fix: docs: update Go-badge in readme ([filecoin-project/lotus#9347](https://github.com/filecoin-project/lotus/pull/9347))
|
||||
- docs: release template: clarify location of version.go ([filecoin-project/lotus#9338](https://github.com/filecoin-project/lotus/pull/9338))
|
||||
- build: Bump version to v1.17.3-dev ([filecoin-project/lotus#9337](https://github.com/filecoin-project/lotus/pull/9337))
|
||||
- github.com/filecoin-project/go-fil-markets (v1.24.0-v17 -> v1.25.0):
|
||||
- Merge branch 'release/v1.24.3'
|
||||
- Update ffi and update markets to v9 (#751) (#761) ([filecoin-project/go-fil-markets#761](https://github.com/filecoin-project/go-fil-markets/pull/761))
|
||||
- chore: extract Provider piece logic to separate file (#750) ([filecoin-project/go-fil-markets#750](https://github.com/filecoin-project/go-fil-markets/pull/750))
|
||||
- Merge branch 'release/v1.24.2'
|
||||
- Feat/support custom metadata (#759) ([filecoin-project/go-fil-markets#759](https://github.com/filecoin-project/go-fil-markets/pull/759))
|
||||
- Revert "Update ffi and update markets to v9 (#751)" (#755) ([filecoin-project/go-fil-markets#755](https://github.com/filecoin-project/go-fil-markets/pull/755))
|
||||
- Update ffi and update markets to v9 (#751) ([filecoin-project/go-fil-markets#751](https://github.com/filecoin-project/go-fil-markets/pull/751))
|
||||
- release: v1.24.0 ([filecoin-project/go-fil-markets#745](https://github.com/filecoin-project/go-fil-markets/pull/745))
|
||||
- github.com/filecoin-project/go-state-types (v0.9.8 -> v0.9.9):
|
||||
|
||||
## lotus-market EOL notice
|
||||
|
||||
As mentioned in [lotus v1.17.0 release notes](https://github.com/filecoin-project/lotus/releases/tag/v1.17.0), markets related features, enhancements and fixes is now lower priority for Lotus. We recommend our users to migrate to other deal making focused software, like [boost](https://boost.filecoin.io/) as soon as possible. That being said, the lotus maintainers will be:
|
||||
- Lotus maintainers will stop supporting lotus-market subcomponent/**storage** deal making related issues or enhancements on Jan 31, 2023.
|
||||
- In Q2 2023, we will be deprecating/removing lotus-market related code from this repository.
|
||||
|
||||
If you have any questions or concerns, please raise them in [Lotus discussion](https://github.com/filecoin-project/lotus/discussions/categories/market)!
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Geoff Stuart | 69 | +4745/-19478 | 405 |
|
||||
| Shrenuj Bansal | 39 | +5257/-2183 | 243 |
|
||||
| Łukasz Magiera | 32 | +2763/-730 | 169 |
|
||||
| Aayush | 47 | +1439/-1138 | 157 |
|
||||
| Ian Davis | 21 | +556/-1065 | 41 |
|
||||
| Rod Vagg | 5 | +657/-320 | 18 |
|
||||
| jennijuju | 4 | +632/-317 | 6 |
|
||||
| Aayush Rajasekaran | 13 | +700/-135 | 18 |
|
||||
| Jennifer Wang | 14 | +740/-54 | 25 |
|
||||
| ZenGround0 | 1 | +193/-195 | 14 |
|
||||
| Hannah Howard | 4 | +138/-122 | 52 |
|
||||
| Steven Allen | 4 | +105/-24 | 11 |
|
||||
| zenground0 | 9 | +109/-16 | 14 |
|
||||
| Peter Rabbitson | 1 | +27/-23 | 3 |
|
||||
| hannahhoward | 2 | +49/-0 | 2 |
|
||||
| Airenas Vaičiūnas | 2 | +31/-16 | 2 |
|
||||
| simlecode | 6 | +19/-10 | 12 |
|
||||
| Phi | 5 | +16/-10 | 7 |
|
||||
| sectrgt | 2 | +18/-0 | 2 |
|
||||
| Jiaying Wang | 2 | +4/-4 | 3 |
|
||||
| Rob Quist | 1 | +3/-1 | 1 |
|
||||
| Jakub Sztandera | 1 | +1/-1 | 1 |
|
||||
|
||||
# 1.18.2 / 2022-12-10
|
||||
|
||||
This is an OPTIONAL patch release that fixes a recently reported bug, where the miner process crashes due to a panic during an AddPiece process. More details can be found [here](https://github.com/filecoin-project/lotus/pull/9822).
|
||||
@ -150,8 +316,6 @@ If you have any questions or concerns, please raise them in [Lotus discussion](h
|
||||
| Peter Rabbitson | 1 | +3/-0 | 1 |
|
||||
| Jakub Sztandera | 1 | +1/-1 | 1 |
|
||||
|
||||
|
||||
|
||||
# v1.17.2 / 2022-10-05
|
||||
|
||||
This is an OPTIONAL release of Lotus. This feature release introduces new sector number management APIs in Lotus that enables all the Sealing-as-a-Service and Lotus interactions needed to function. The default propagation delay setting for storage providers has also been changed, as well as numerous other features and enhancements. Check out the sub-bullet points in the feature and enhancement section to get a short description about each feature and enhancements.
|
||||
|
@ -3,28 +3,43 @@ MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
|
||||
ARG RUST_VERSION=nightly
|
||||
ENV XDG_CACHE_HOME="/tmp"
|
||||
|
||||
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
|
||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||
CARGO_HOME=/usr/local/cargo \
|
||||
PATH=/usr/local/cargo/bin:$PATH
|
||||
PATH=/usr/local/cargo/bin:$PATH \
|
||||
RUST_VERSION=1.63.0
|
||||
|
||||
RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
|
||||
RUN set -eux; \
|
||||
dpkgArch="$(dpkg --print-architecture)"; \
|
||||
case "${dpkgArch##*-}" in \
|
||||
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
|
||||
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
|
||||
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
|
||||
esac; \
|
||||
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
|
||||
wget "$url"; \
|
||||
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
|
||||
chmod +x rustup-init; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
### end rust
|
||||
|
||||
FROM builder-deps AS builder-local
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
### make configurable filecoin-ffi build
|
||||
ARG FFI_BUILD_FROM_SOURCE=0
|
||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
||||
|
||||
RUN make clean deps
|
||||
|
||||
|
||||
@ -52,14 +67,14 @@ MAINTAINER Lotus Development Team
|
||||
|
||||
# Base resources
|
||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/*/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/*/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/*/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/*/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/*/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
&& mkdir -p /etc/OpenCL/vendors \
|
||||
|
2
Makefile
2
Makefile
@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
|
||||
deps: $(BUILD_DEPS)
|
||||
.PHONY: deps
|
||||
|
||||
build-devnets: build lotus-seed lotus-shed lotus-wallet lotus-gateway lotus-fountain lotus-stats
|
||||
build-devnets: build lotus-seed lotus-shed
|
||||
.PHONY: build-devnets
|
||||
|
||||
debug: GOFLAGS+=-tags=debug
|
||||
|
@ -10,7 +10,7 @@
|
||||
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
||||
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
||||
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.17-blue.svg" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.1-blue.svg" /></a>
|
||||
<br>
|
||||
</p>
|
||||
|
||||
|
@ -3,6 +3,7 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
@ -49,6 +50,9 @@ type Common interface {
|
||||
// trigger graceful shutdown
|
||||
Shutdown(context.Context) error //perm:admin
|
||||
|
||||
// StartTime returns node start time
|
||||
StartTime(context.Context) (time.Time, error) //perm:read
|
||||
|
||||
// Session returns a random UUID of api provider session
|
||||
Session(context.Context) (uuid.UUID, error) //perm:read
|
||||
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -763,6 +764,9 @@ type FullNode interface {
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
@ -1012,8 +1016,12 @@ type RetrievalOrder struct {
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeer *retrievalmarket.RetrievalPeer
|
||||
|
||||
RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"`
|
||||
}
|
||||
|
||||
type RemoteStoreID = uuid.UUID
|
||||
|
||||
type InvocResult struct {
|
||||
MsgCid cid.Cid
|
||||
Msg *types.Message
|
||||
|
@ -320,7 +320,7 @@ type StorageMiner interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
|
||||
|
@ -349,6 +349,10 @@ func init() {
|
||||
addExample(map[string]bitfield.BitField{
|
||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||
})
|
||||
addExample(&api.RaftStateData{
|
||||
NonceMap: make(map[address.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
})
|
||||
|
||||
addExample(http.Header{
|
||||
"Authorization": []string{"Bearer ey.."},
|
||||
@ -361,6 +365,7 @@ func init() {
|
||||
Headers: nil,
|
||||
},
|
||||
})
|
||||
addExample(&uuid.UUID{})
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
|
@ -2244,6 +2244,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// RaftLeader mocks base method.
|
||||
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
||||
ret0, _ := ret[0].(peer.ID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftLeader indicates an expected call of RaftLeader.
|
||||
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
||||
}
|
||||
|
||||
// RaftState mocks base method.
|
||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
||||
ret0, _ := ret[0].(*api.RaftStateData)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftState indicates an expected call of RaftState.
|
||||
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
||||
}
|
||||
|
||||
// Session mocks base method.
|
||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2273,6 +2303,21 @@ func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
|
||||
}
|
||||
|
||||
// StartTime mocks base method.
|
||||
func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StartTime", arg0)
|
||||
ret0, _ := ret[0].(time.Time)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StartTime indicates an expected call of StartTime.
|
||||
func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0)
|
||||
}
|
||||
|
||||
// StateAccountKey mocks base method.
|
||||
func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -80,6 +80,8 @@ type CommonStruct struct {
|
||||
|
||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
StartTime func(p0 context.Context) (time.Time, error) `perm:"read"`
|
||||
|
||||
Version func(p0 context.Context) (APIVersion, error) `perm:"read"`
|
||||
}
|
||||
}
|
||||
@ -340,6 +342,10 @@ type FullNodeStruct struct {
|
||||
|
||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
||||
|
||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||
@ -677,7 +683,7 @@ type StorageMinerStruct struct {
|
||||
|
||||
BeneficiaryWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
||||
|
||||
@ -1173,6 +1179,17 @@ func (s *CommonStub) Shutdown(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) StartTime(p0 context.Context) (time.Time, error) {
|
||||
if s.Internal.StartTime == nil {
|
||||
return *new(time.Time), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StartTime(p0)
|
||||
}
|
||||
|
||||
func (s *CommonStub) StartTime(p0 context.Context) (time.Time, error) {
|
||||
return *new(time.Time), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) Version(p0 context.Context) (APIVersion, error) {
|
||||
if s.Internal.Version == nil {
|
||||
return *new(APIVersion), ErrNotSupported
|
||||
@ -2460,6 +2477,28 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
if s.Internal.RaftLeader == nil {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftLeader(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
if s.Internal.RaftState == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftState(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||
if s.Internal.StateAccountKey == nil {
|
||||
return *new(address.Address), ErrNotSupported
|
||||
@ -4121,14 +4160,14 @@ func (s *StorageMinerStub) BeneficiaryWithdrawBalance(p0 context.Context, p1 abi
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||
return s.Internal.CheckProvable(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
|
63
api/types.go
63
api/types.go
@ -59,6 +59,11 @@ type MessageSendSpec struct {
|
||||
MsgUuid uuid.UUID
|
||||
}
|
||||
|
||||
type MpoolMessageWhole struct {
|
||||
Msg *types.Message
|
||||
Spec *MessageSendSpec
|
||||
}
|
||||
|
||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||
type GraphSyncDataTransfer struct {
|
||||
// GraphSync request id for this transfer
|
||||
@ -334,3 +339,61 @@ type ForkUpgradeParams struct {
|
||||
UpgradeSkyrHeight abi.ChainEpoch
|
||||
UpgradeSharkHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
|
||||
type RaftStateData struct {
|
||||
NonceMap NonceMapType
|
||||
MsgUuids MsgUuidMapType
|
||||
}
|
||||
|
||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]uint64)
|
||||
for a, n := range *n {
|
||||
marshalled[a.String()] = n
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]uint64)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = make(map[address.Address]uint64)
|
||||
for saddr, nonce := range unmarshalled {
|
||||
a, err := address.NewFromString(saddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*n)[a] = nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]*types.SignedMessage)
|
||||
for u, msg := range *m {
|
||||
marshalled[u.String()] = msg
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]*types.SignedMessage)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||
for suid, msg := range unmarshalled {
|
||||
u, err := uuid.Parse(suid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[u] = msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2158,6 +2158,21 @@ func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
|
||||
}
|
||||
|
||||
// StartTime mocks base method.
|
||||
func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StartTime", arg0)
|
||||
ret0, _ := ret[0].(time.Time)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StartTime indicates an expected call of StartTime.
|
||||
func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0)
|
||||
}
|
||||
|
||||
// StateAccountKey mocks base method.
|
||||
func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
441
blockstore/cbor_gen.go
Normal file
441
blockstore/cbor_gen.go
Normal file
@ -0,0 +1,441 @@
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufNetRpcReq = []byte{132}
|
||||
|
||||
func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcReq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCReqType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Cid ([]cid.Cid) (slice)
|
||||
if len(t.Cid) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Cid was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cid))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Cid {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Cid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.Data ([][]uint8) (slice)
|
||||
if len(t.Data) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Data was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Data))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Data {
|
||||
if len(v) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field v was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := cw.Write(v[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcReq{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 4 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCReqType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCReqType(extra)
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.ID = uint64(extra)
|
||||
|
||||
}
|
||||
// t.Cid ([]cid.Cid) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Cid: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Cid = make([]cid.Cid, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Cid failed: %w", err)
|
||||
}
|
||||
t.Cid[i] = c
|
||||
}
|
||||
|
||||
// t.Data ([][]uint8) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Data: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data = make([][]uint8, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
{
|
||||
var maj byte
|
||||
var extra uint64
|
||||
var err error
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data[i] = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var lengthBufNetRpcResp = []byte{131}
|
||||
|
||||
func (t *NetRpcResp) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcResp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCRespType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Data ([]uint8) (slice)
|
||||
if len(t.Data) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field t.Data was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := cw.Write(t.Data[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcResp{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCRespType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCRespType(extra)
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.ID = uint64(extra)
|
||||
|
||||
}
|
||||
// t.Data ([]uint8) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Data: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(cr, t.Data[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var lengthBufNetRpcErr = []byte{131}
|
||||
|
||||
func (t *NetRpcErr) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCErrType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Msg (string) (string)
|
||||
if len(t.Msg) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field t.Msg was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Msg)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Cid (cid.Cid) (struct)
|
||||
|
||||
if t.Cid == nil {
|
||||
if _, err := cw.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCid(cw, *t.Cid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.Cid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcErr{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCErrType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCErrType(extra)
|
||||
// t.Msg (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Msg = string(sval)
|
||||
}
|
||||
// t.Cid (cid.Cid) (struct)
|
||||
|
||||
{
|
||||
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.Cid: %w", err)
|
||||
}
|
||||
|
||||
t.Cid = &c
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
@ -47,6 +47,9 @@ func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error)
|
||||
if !ok {
|
||||
return nil, ipld.ErrNotFound{Cid: k}
|
||||
}
|
||||
if b.Cid().Prefix().Codec != k.Prefix().Codec {
|
||||
return blocks.NewBlockWithCid(b.RawData(), k)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
45
blockstore/mem_test.go
Normal file
45
blockstore/mem_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemGetCodec(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs := NewMemory()
|
||||
|
||||
cborArr := []byte{0x82, 1, 2}
|
||||
|
||||
h, err := mh.Sum(cborArr, mh.SHA2_256, -1)
|
||||
require.NoError(t, err)
|
||||
|
||||
rawCid := cid.NewCidV1(cid.Raw, h)
|
||||
rawBlk, err := blocks.NewBlockWithCid(cborArr, rawCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bs.Put(ctx, rawBlk)
|
||||
require.NoError(t, err)
|
||||
|
||||
cborCid := cid.NewCidV1(cid.DagCBOR, h)
|
||||
|
||||
cborBlk, err := bs.Get(ctx, cborCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, cborCid.Prefix(), cborBlk.Cid().Prefix())
|
||||
require.EqualValues(t, cborArr, cborBlk.RawData())
|
||||
|
||||
// was allocated
|
||||
require.NotEqual(t, cborBlk, rawBlk)
|
||||
|
||||
gotRawBlk, err := bs.Get(ctx, rawCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
// not allocated
|
||||
require.Equal(t, rawBlk, gotRawBlk)
|
||||
}
|
424
blockstore/net.go
Normal file
424
blockstore/net.go
Normal file
@ -0,0 +1,424 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type NetRPCReqType byte
|
||||
|
||||
const (
|
||||
NRpcHas NetRPCReqType = iota
|
||||
NRpcGet
|
||||
NRpcGetSize
|
||||
NRpcPut
|
||||
NRpcDelete
|
||||
|
||||
// todo cancel req
|
||||
)
|
||||
|
||||
type NetRPCRespType byte
|
||||
|
||||
const (
|
||||
NRpcOK NetRPCRespType = iota
|
||||
NRpcErr
|
||||
NRpcMore
|
||||
)
|
||||
|
||||
type NetRPCErrType byte
|
||||
|
||||
const (
|
||||
NRpcErrGeneric NetRPCErrType = iota
|
||||
NRpcErrNotFound
|
||||
)
|
||||
|
||||
type NetRpcReq struct {
|
||||
Type NetRPCReqType
|
||||
ID uint64
|
||||
|
||||
Cid []cid.Cid // todo maxsize?
|
||||
Data [][]byte // todo maxsize?
|
||||
}
|
||||
|
||||
type NetRpcResp struct {
|
||||
Type NetRPCRespType
|
||||
ID uint64
|
||||
|
||||
// error or cids in allkeys
|
||||
Data []byte // todo maxsize?
|
||||
|
||||
next <-chan NetRpcResp
|
||||
}
|
||||
|
||||
type NetRpcErr struct {
|
||||
Type NetRPCErrType
|
||||
|
||||
Msg string
|
||||
|
||||
// in case of NRpcErrNotFound
|
||||
Cid *cid.Cid
|
||||
}
|
||||
|
||||
type NetworkStore struct {
|
||||
// note: writer is thread-safe
|
||||
msgStream msgio.ReadWriteCloser
|
||||
|
||||
// atomic
|
||||
reqCount uint64
|
||||
|
||||
respLk sync.Mutex
|
||||
|
||||
// respMap is nil after store closes
|
||||
respMap map[uint64]chan<- NetRpcResp
|
||||
|
||||
closing chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
closeLk sync.Mutex
|
||||
onClose []func()
|
||||
}
|
||||
|
||||
func NewNetworkStore(mss msgio.ReadWriteCloser) *NetworkStore {
|
||||
ns := &NetworkStore{
|
||||
msgStream: mss,
|
||||
|
||||
respMap: map[uint64]chan<- NetRpcResp{},
|
||||
|
||||
closing: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
go ns.receive()
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (n *NetworkStore) shutdown(msg string) {
|
||||
if err := n.msgStream.Close(); err != nil {
|
||||
log.Errorw("closing netstore msg stream", "error", err)
|
||||
}
|
||||
|
||||
nerr := NetRpcErr{
|
||||
Type: NRpcErrGeneric,
|
||||
Msg: msg,
|
||||
Cid: nil,
|
||||
}
|
||||
|
||||
var errb bytes.Buffer
|
||||
if err := nerr.MarshalCBOR(&errb); err != nil {
|
||||
log.Errorw("netstore shutdown: error marshaling error", "err", err)
|
||||
}
|
||||
|
||||
n.respLk.Lock()
|
||||
for id, resps := range n.respMap {
|
||||
resps <- NetRpcResp{
|
||||
Type: NRpcErr,
|
||||
ID: id,
|
||||
Data: errb.Bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
n.respMap = nil
|
||||
|
||||
n.respLk.Unlock()
|
||||
}
|
||||
|
||||
func (n *NetworkStore) OnClose(cb func()) {
|
||||
n.closeLk.Lock()
|
||||
defer n.closeLk.Unlock()
|
||||
|
||||
select {
|
||||
case <-n.closed:
|
||||
cb()
|
||||
default:
|
||||
n.onClose = append(n.onClose, cb)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) receive() {
|
||||
defer func() {
|
||||
n.closeLk.Lock()
|
||||
defer n.closeLk.Unlock()
|
||||
|
||||
close(n.closed)
|
||||
if n.onClose != nil {
|
||||
for _, f := range n.onClose {
|
||||
f()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-n.closing:
|
||||
n.shutdown("netstore stopping")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
msg, err := n.msgStream.ReadMsg()
|
||||
if err != nil {
|
||||
n.shutdown(fmt.Sprintf("netstore ReadMsg: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
var resp NetRpcResp
|
||||
if err := resp.UnmarshalCBOR(bytes.NewReader(msg)); err != nil {
|
||||
n.shutdown(fmt.Sprintf("unmarshaling netstore response: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
n.msgStream.ReleaseMsg(msg)
|
||||
|
||||
n.respLk.Lock()
|
||||
if ch, ok := n.respMap[resp.ID]; ok {
|
||||
if resp.Type == NRpcMore {
|
||||
nch := make(chan NetRpcResp, 1)
|
||||
resp.next = nch
|
||||
n.respMap[resp.ID] = nch
|
||||
} else {
|
||||
delete(n.respMap, resp.ID)
|
||||
}
|
||||
|
||||
ch <- resp
|
||||
}
|
||||
n.respLk.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) sendRpc(rt NetRPCReqType, cids []cid.Cid, data [][]byte) (uint64, <-chan NetRpcResp, error) {
|
||||
rid := atomic.AddUint64(&n.reqCount, 1)
|
||||
|
||||
respCh := make(chan NetRpcResp, 1) // todo pool?
|
||||
|
||||
n.respLk.Lock()
|
||||
if n.respMap == nil {
|
||||
n.respLk.Unlock()
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
n.respMap[rid] = respCh
|
||||
n.respLk.Unlock()
|
||||
|
||||
req := NetRpcReq{
|
||||
Type: rt,
|
||||
ID: rid,
|
||||
Cid: cids,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
var rbuf bytes.Buffer // todo buffer pool
|
||||
if err := req.MarshalCBOR(&rbuf); err != nil {
|
||||
n.respLk.Lock()
|
||||
defer n.respLk.Unlock()
|
||||
|
||||
if n.respMap == nil {
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
delete(n.respMap, rid)
|
||||
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
if err := n.msgStream.WriteMsg(rbuf.Bytes()); err != nil {
|
||||
n.respLk.Lock()
|
||||
defer n.respLk.Unlock()
|
||||
|
||||
if n.respMap == nil {
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
delete(n.respMap, rid)
|
||||
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
return rid, respCh, nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) waitResp(ctx context.Context, rch <-chan NetRpcResp, rid uint64) (NetRpcResp, error) {
|
||||
select {
|
||||
case resp := <-rch:
|
||||
if resp.Type == NRpcErr {
|
||||
var e NetRpcErr
|
||||
if err := e.UnmarshalCBOR(bytes.NewReader(resp.Data)); err != nil {
|
||||
return NetRpcResp{}, xerrors.Errorf("unmarshaling error data: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
switch e.Type {
|
||||
case NRpcErrNotFound:
|
||||
if e.Cid != nil {
|
||||
err = ipld.ErrNotFound{
|
||||
Cid: *e.Cid,
|
||||
}
|
||||
} else {
|
||||
err = xerrors.Errorf("block not found, but cid was null")
|
||||
}
|
||||
case NRpcErrGeneric:
|
||||
err = xerrors.Errorf("generic error")
|
||||
default:
|
||||
err = xerrors.Errorf("unknown error type")
|
||||
}
|
||||
|
||||
return NetRpcResp{}, xerrors.Errorf("netstore error response: %s (%w)", e.Msg, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
case <-ctx.Done():
|
||||
// todo send cancel req
|
||||
|
||||
n.respLk.Lock()
|
||||
if n.respMap != nil {
|
||||
delete(n.respMap, rid)
|
||||
}
|
||||
n.respLk.Unlock()
|
||||
|
||||
return NetRpcResp{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
req, rch, err := n.sendRpc(NRpcHas, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resp.Data) != 1 {
|
||||
return false, xerrors.Errorf("expected reposnse length to be 1 byte")
|
||||
}
|
||||
switch resp.Data[0] {
|
||||
case cbg.CborBoolTrue[0]:
|
||||
return true, nil
|
||||
case cbg.CborBoolFalse[0]:
|
||||
return false, nil
|
||||
default:
|
||||
return false, xerrors.Errorf("has: bad response: %x", resp.Data[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks.NewBlockWithCid(resp.Data, c)
|
||||
}
|
||||
|
||||
func (n *NetworkStore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return callback(resp.Data) // todo return buf to pool
|
||||
}
|
||||
|
||||
func (n *NetworkStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||
req, rch, err := n.sendRpc(NRpcGetSize, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(resp.Data) != 4 {
|
||||
return 0, xerrors.Errorf("expected getsize response to be 4 bytes, was %d", resp.Data)
|
||||
}
|
||||
|
||||
return int(binary.LittleEndian.Uint32(resp.Data)), nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Put(ctx context.Context, block blocks.Block) error {
|
||||
return n.PutMany(ctx, []blocks.Block{block})
|
||||
}
|
||||
|
||||
func (n *NetworkStore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||
// todo pool
|
||||
cids := make([]cid.Cid, len(blocks))
|
||||
blkDatas := make([][]byte, len(blocks))
|
||||
for i, block := range blocks {
|
||||
cids[i] = block.Cid()
|
||||
blkDatas[i] = block.RawData()
|
||||
}
|
||||
|
||||
req, rch, err := n.sendRpc(NRpcPut, cids, blkDatas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) DeleteBlock(ctx context.Context, c cid.Cid) error {
|
||||
return n.DeleteMany(ctx, []cid.Cid{c})
|
||||
}
|
||||
|
||||
func (n *NetworkStore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||
req, rch, err := n.sendRpc(NRpcDelete, cids, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
return nil, xerrors.Errorf("not supported")
|
||||
}
|
||||
|
||||
func (n *NetworkStore) HashOnRead(enabled bool) {
|
||||
// todo
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Stop(ctx context.Context) error {
|
||||
close(n.closing)
|
||||
|
||||
select {
|
||||
case <-n.closed:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var _ Blockstore = &NetworkStore{}
|
237
blockstore/net_serve.go
Normal file
237
blockstore/net_serve.go
Normal file
@ -0,0 +1,237 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type NetworkStoreHandler struct {
|
||||
msgStream msgio.ReadWriteCloser
|
||||
|
||||
bs Blockstore
|
||||
}
|
||||
|
||||
// NOTE: This code isn't yet hardened to accept untrusted input. See TODOs here and in net.go
|
||||
func HandleNetBstoreStream(ctx context.Context, bs Blockstore, mss msgio.ReadWriteCloser) *NetworkStoreHandler {
|
||||
ns := &NetworkStoreHandler{
|
||||
msgStream: mss,
|
||||
bs: bs,
|
||||
}
|
||||
|
||||
go ns.handle(ctx)
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) handle(ctx context.Context) {
|
||||
defer func() {
|
||||
if err := h.msgStream.Close(); err != nil {
|
||||
log.Errorw("error closing blockstore stream", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
var req NetRpcReq
|
||||
|
||||
ms, err := h.msgStream.ReadMsg()
|
||||
if err != nil {
|
||||
log.Warnw("bstore stream err", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.UnmarshalCBOR(bytes.NewReader(ms)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
h.msgStream.ReleaseMsg(ms)
|
||||
|
||||
switch req.Type {
|
||||
case NRpcHas:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := h.bs.Has(ctx, req.Cid[0])
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var resData [1]byte
|
||||
if res {
|
||||
resData[0] = cbg.CborBoolTrue[0]
|
||||
} else {
|
||||
resData[0] = cbg.CborBoolFalse[0]
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
case NRpcGet:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := h.bs.View(ctx, req.Cid[0], func(bdata []byte) error {
|
||||
return h.respond(req.ID, NRpcOK, bdata)
|
||||
})
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
case NRpcGetSize:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
sz, err := h.bs.GetSize(ctx, req.Cid[0])
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var resData [4]byte
|
||||
binary.LittleEndian.PutUint32(resData[:], uint32(sz))
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
case NRpcPut:
|
||||
blocks := make([]block.Block, len(req.Cid))
|
||||
|
||||
if len(req.Cid) != len(req.Data) {
|
||||
if err := h.respondError(req.ID, xerrors.New("cid count didn't match data count"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := range req.Cid {
|
||||
blocks[i], err = block.NewBlockWithCid(req.Data[i], req.Cid[i])
|
||||
if err != nil {
|
||||
log.Warnw("make block", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := h.bs.PutMany(ctx, blocks)
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
case NRpcDelete:
|
||||
err := h.bs.DeleteMany(ctx, req.Cid)
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
if err := h.respondError(req.ID, xerrors.New("unsupported request type"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) respondError(req uint64, uerr error, c cid.Cid) error {
|
||||
var resp NetRpcResp
|
||||
resp.ID = req
|
||||
resp.Type = NRpcErr
|
||||
|
||||
nerr := NetRpcErr{
|
||||
Type: NRpcErrGeneric,
|
||||
Msg: uerr.Error(),
|
||||
}
|
||||
if ipld.IsNotFound(uerr) {
|
||||
nerr.Type = NRpcErrNotFound
|
||||
nerr.Cid = &c
|
||||
}
|
||||
|
||||
var edata bytes.Buffer
|
||||
if err := nerr.MarshalCBOR(&edata); err != nil {
|
||||
return xerrors.Errorf("marshaling error data: %w", err)
|
||||
}
|
||||
|
||||
resp.Data = edata.Bytes()
|
||||
|
||||
var msg bytes.Buffer
|
||||
if err := resp.MarshalCBOR(&msg); err != nil {
|
||||
return xerrors.Errorf("marshaling error response: %w", err)
|
||||
}
|
||||
|
||||
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
|
||||
return xerrors.Errorf("write error response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) respond(req uint64, rt NetRPCRespType, data []byte) error {
|
||||
var resp NetRpcResp
|
||||
resp.ID = req
|
||||
resp.Type = rt
|
||||
resp.Data = data
|
||||
|
||||
var msg bytes.Buffer
|
||||
if err := resp.MarshalCBOR(&msg); err != nil {
|
||||
return xerrors.Errorf("marshaling response: %w", err)
|
||||
}
|
||||
|
||||
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
|
||||
return xerrors.Errorf("write response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
63
blockstore/net_test.go
Normal file
63
blockstore/net_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNetBstore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cr, sw := io.Pipe()
|
||||
sr, cw := io.Pipe()
|
||||
|
||||
cm := msgio.Combine(msgio.NewWriter(cw), msgio.NewReader(cr))
|
||||
sm := msgio.Combine(msgio.NewWriter(sw), msgio.NewReader(sr))
|
||||
|
||||
bbs := NewMemorySync()
|
||||
_ = HandleNetBstoreStream(ctx, bbs, sm)
|
||||
|
||||
nbs := NewNetworkStore(cm)
|
||||
|
||||
tb1 := block.NewBlock([]byte("aoeu"))
|
||||
|
||||
h, err := nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.False(t, h)
|
||||
|
||||
err = nbs.Put(ctx, tb1)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err = nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.True(t, h)
|
||||
|
||||
sz, err := nbs.GetSize(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, sz)
|
||||
|
||||
err = nbs.DeleteBlock(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err = nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.False(t, h)
|
||||
|
||||
_, err = nbs.Get(ctx, tb1.Cid())
|
||||
fmt.Println(err)
|
||||
require.True(t, ipld.IsNotFound(err))
|
||||
|
||||
err = nbs.Put(ctx, tb1)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := nbs.Get(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "aoeu", string(b.RawData()))
|
||||
}
|
100
blockstore/net_ws.go
Normal file
100
blockstore/net_ws.go
Normal file
@ -0,0 +1,100 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type wsWrapper struct {
|
||||
wc *websocket.Conn
|
||||
|
||||
nextMsg []byte
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Read(b []byte) (int, error) {
|
||||
return 0, xerrors.New("read unsupported")
|
||||
}
|
||||
|
||||
func (w *wsWrapper) ReadMsg() ([]byte, error) {
|
||||
if w.nextMsg != nil {
|
||||
nm := w.nextMsg
|
||||
w.nextMsg = nil
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
mt, r, err := w.wc.NextReader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case websocket.BinaryMessage, websocket.TextMessage:
|
||||
default:
|
||||
return nil, xerrors.Errorf("unexpected message type")
|
||||
}
|
||||
|
||||
// todo pool
|
||||
// todo limit sizes
|
||||
var mbuf bytes.Buffer
|
||||
if _, err := mbuf.ReadFrom(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (w *wsWrapper) ReleaseMsg(bytes []byte) {
|
||||
// todo use a pool
|
||||
}
|
||||
|
||||
func (w *wsWrapper) NextMsgLen() (int, error) {
|
||||
if w.nextMsg != nil {
|
||||
return len(w.nextMsg), nil
|
||||
}
|
||||
|
||||
mt, msg, err := w.wc.ReadMessage()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case websocket.BinaryMessage, websocket.TextMessage:
|
||||
default:
|
||||
return 0, xerrors.Errorf("unexpected message type")
|
||||
}
|
||||
|
||||
w.nextMsg = msg
|
||||
return len(w.nextMsg), nil
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Write(bytes []byte) (int, error) {
|
||||
return 0, xerrors.New("write unsupported")
|
||||
}
|
||||
|
||||
func (w *wsWrapper) WriteMsg(bytes []byte) error {
|
||||
return w.wc.WriteMessage(websocket.BinaryMessage, bytes)
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Close() error {
|
||||
return w.wc.Close()
|
||||
}
|
||||
|
||||
var _ msgio.ReadWriteCloser = &wsWrapper{}
|
||||
|
||||
func wsConnToMio(wc *websocket.Conn) msgio.ReadWriteCloser {
|
||||
return &wsWrapper{
|
||||
wc: wc,
|
||||
}
|
||||
}
|
||||
|
||||
func HandleNetBstoreWS(ctx context.Context, bs Blockstore, wc *websocket.Conn) *NetworkStoreHandler {
|
||||
return HandleNetBstoreStream(ctx, bs, wsConnToMio(wc))
|
||||
}
|
||||
|
||||
func NewNetworkStoreWS(wc *websocket.Conn) *NetworkStore {
|
||||
return NewNetworkStore(wsConnToMio(wc))
|
||||
}
|
@ -98,6 +98,10 @@ type Config struct {
|
||||
// and directly purges cold blocks.
|
||||
DiscardColdBlocks bool
|
||||
|
||||
// UniversalColdBlocks indicates whether all blocks being garbage collected and purged
|
||||
// from the hotstore should be written to the cold store
|
||||
UniversalColdBlocks bool
|
||||
|
||||
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
|
||||
// It has the following semantics:
|
||||
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
|
||||
@ -111,21 +115,6 @@ type Config struct {
|
||||
// A positive value is the number of compactions before a full GC is performed;
|
||||
// a value of 1 will perform full GC in every compaction.
|
||||
HotStoreFullGCFrequency uint64
|
||||
|
||||
// EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||
// where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||
// Default is false
|
||||
EnableColdStoreAutoPrune bool
|
||||
|
||||
// ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||
// Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||
// full GC in every prune.
|
||||
// Default is 7 (about once every a week)
|
||||
ColdStoreFullGCFrequency uint64
|
||||
|
||||
// ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||
// finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||
ColdStoreRetention int64
|
||||
}
|
||||
|
||||
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
||||
|
@ -125,7 +125,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}, func(cid.Cid) error { return nil })
|
||||
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("error walking chain: %w", err)
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
@ -134,39 +133,6 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
log.Infow("compaction done", "took", time.Since(start))
|
||||
}()
|
||||
// only prune if auto prune is enabled and after at least one compaction
|
||||
} else if s.cfg.EnableColdStoreAutoPrune && epoch-s.pruneEpoch > PruneThreshold && s.compactionIndex > 0 {
|
||||
s.beginTxnProtect()
|
||||
s.compactType = cold
|
||||
go func() {
|
||||
defer atomic.StoreInt32(&s.compacting, 0)
|
||||
defer s.endTxnProtect()
|
||||
|
||||
log.Info("pruning splitstore")
|
||||
start := time.Now()
|
||||
|
||||
var retainP func(int64) bool
|
||||
switch {
|
||||
case s.cfg.ColdStoreRetention > int64(0):
|
||||
retainP = func(depth int64) bool {
|
||||
return depth <= int64(CompactionBoundary)+s.cfg.ColdStoreRetention*int64(build.Finality)
|
||||
}
|
||||
case s.cfg.ColdStoreRetention < 0:
|
||||
retainP = func(_ int64) bool { return true }
|
||||
default:
|
||||
retainP = func(depth int64) bool {
|
||||
return depth <= int64(CompactionBoundary)
|
||||
}
|
||||
}
|
||||
movingGC := s.cfg.ColdStoreFullGCFrequency > 0 && s.pruneIndex%int64(s.cfg.ColdStoreFullGCFrequency) == 0
|
||||
var gcOpts []bstore.BlockstoreGCOption
|
||||
if movingGC {
|
||||
gcOpts = append(gcOpts, bstore.WithFullGC(true))
|
||||
}
|
||||
doGC := func() error { return s.gcBlockstore(s.cold, gcOpts) }
|
||||
|
||||
s.prune(curTs, retainP, doGC)
|
||||
log.Infow("prune done", "took", time.Since(start))
|
||||
}()
|
||||
} else {
|
||||
// no compaction necessary
|
||||
atomic.StoreInt32(&s.compacting, 0)
|
||||
@ -562,6 +528,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
defer markSet.Close() //nolint:errcheck
|
||||
defer s.debug.Flush()
|
||||
|
||||
coldSet, err := s.markSetEnv.New("cold", s.markSetSize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating cold mark set: %w", err)
|
||||
}
|
||||
defer coldSet.Close() //nolint:errcheck
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -580,24 +552,52 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
startMark := time.Now()
|
||||
|
||||
count := new(int64)
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{},
|
||||
func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := markSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt64(count, 1)
|
||||
coldCount := new(int64)
|
||||
fCold := func(c cid.Cid) error {
|
||||
// Writes to cold set optimized away in universal and discard mode
|
||||
//
|
||||
// Nothing gets written to cold store in discard mode so no cold objects to write
|
||||
// Everything not marked hot gets written to cold store in universal mode so no need to track cold objects separately
|
||||
if s.cfg.DiscardColdBlocks || s.cfg.UniversalColdBlocks {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := coldSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt64(coldCount, 1)
|
||||
return nil
|
||||
}
|
||||
fHot := func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := markSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt64(count, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold)
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error marking: %w", err)
|
||||
@ -631,8 +631,14 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
defer coldw.Close() //nolint:errcheck
|
||||
|
||||
purgew, err := NewColdSetWriter(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating deadset: %w", err)
|
||||
}
|
||||
defer purgew.Close() //nolint:errcheck
|
||||
|
||||
// some stats for logging
|
||||
var hotCnt, coldCnt int
|
||||
var hotCnt, coldCnt, purgeCnt int
|
||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||
// was it marked?
|
||||
mark, err := markSet.Has(c)
|
||||
@ -645,9 +651,27 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// it's cold, mark it as candidate for move
|
||||
// it needs to be removed from hot store, mark it as candidate for purge
|
||||
if err := purgew.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to purge set: %w", err)
|
||||
}
|
||||
purgeCnt++
|
||||
|
||||
coldMark, err := coldSet.Has(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking cold mark set for %s: %w", c, err)
|
||||
}
|
||||
|
||||
// Discard mode: coldMark == false, s.cfg.UniversalColdBlocks == false, always return here, no writes to cold store
|
||||
// Universal mode: coldMark == false, s.cfg.UniversalColdBlocks == true, never stop here, all writes to cold store
|
||||
// Otherwise: s.cfg.UniversalColdBlocks == false, if !coldMark stop here and don't write to cold store, if coldMark continue and write to cold store
|
||||
if !coldMark && !s.cfg.UniversalColdBlocks { // universal mode means mark everything as cold
|
||||
return nil
|
||||
}
|
||||
|
||||
// it's cold, mark as candidate for move
|
||||
if err := coldw.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to coldstore: %w", err)
|
||||
return xerrors.Errorf("error writing cid to cold set")
|
||||
}
|
||||
coldCnt++
|
||||
|
||||
@ -656,7 +680,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error collecting cold objects: %w", err)
|
||||
}
|
||||
|
||||
if err := purgew.Close(); err != nil {
|
||||
return xerrors.Errorf("erroring closing purgeset: %w", err)
|
||||
}
|
||||
if err := coldw.Close(); err != nil {
|
||||
return xerrors.Errorf("error closing coldset: %w", err)
|
||||
}
|
||||
@ -705,6 +731,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
}
|
||||
|
||||
purger, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer purger.Close() //nolint:errcheck
|
||||
|
||||
// 4. Purge cold objects with checkpointing for recovery.
|
||||
// This is the critical section of compaction, whereby any cold object not in the markSet is
|
||||
// considered already deleted.
|
||||
@ -736,7 +768,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
// 5. purge cold objects from the hotstore, taking protected references into account
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.purge(coldr, checkpoint, markSet)
|
||||
err = s.purge(purger, checkpoint, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
@ -864,7 +896,7 @@ func (s *SplitStore) endCriticalSection() {
|
||||
}
|
||||
|
||||
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
||||
visitor ObjectVisitor, f func(cid.Cid) error) error {
|
||||
visitor ObjectVisitor, fHot, fCold func(cid.Cid) error) error {
|
||||
var walked ObjectVisitor
|
||||
var mx sync.Mutex
|
||||
// we copy the tipset first into a new slice, which allows us to reuse it in every epoch.
|
||||
@ -886,7 +918,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
|
||||
atomic.AddInt64(walkCnt, 1)
|
||||
|
||||
if err := f(c); err != nil {
|
||||
if err := fHot(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -904,27 +936,37 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
if inclMsgs < inclState {
|
||||
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
|
||||
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, f, stopWalk); err != nil {
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, f, stopWalk); err != nil {
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
} else {
|
||||
if err := s.walkObject(hdr.Messages, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
|
||||
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// messages and receipts outside of inclMsgs are included in the cold store
|
||||
if hdr.Height < inclMsgs && hdr.Height > 0 {
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
}
|
||||
|
||||
// state is only retained if within the inclState boundary, with the exception of genesis
|
||||
if hdr.Height >= inclState || hdr.Height == 0 {
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
||||
}
|
||||
atomic.AddInt64(scanCnt, 1)
|
||||
@ -1296,7 +1338,7 @@ func (s *SplitStore) coldSetPath() string {
|
||||
return filepath.Join(s.path, "coldset")
|
||||
}
|
||||
|
||||
func (s *SplitStore) deadSetPath() string {
|
||||
func (s *SplitStore) discardSetPath() string {
|
||||
return filepath.Join(s.path, "deadset")
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
log.Info("collecting dead objects")
|
||||
startCollect := time.Now()
|
||||
|
||||
deadw, err := NewColdSetWriter(s.deadSetPath())
|
||||
deadw, err := NewColdSetWriter(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
@ -267,7 +267,7 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
return err
|
||||
}
|
||||
|
||||
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||
deadr, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening deadset: %w", err)
|
||||
}
|
||||
@ -311,10 +311,10 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := deadr.Close(); err != nil {
|
||||
log.Warnf("error closing deadset: %s", err)
|
||||
log.Warnf("error closing discard set: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||
log.Warnf("error removing deadset: %s", err)
|
||||
if err := os.Remove(s.discardSetPath()); err != nil {
|
||||
log.Warnf("error removing discard set: %s", err)
|
||||
}
|
||||
|
||||
// we are done; do some housekeeping
|
||||
@ -344,7 +344,7 @@ func (s *SplitStore) completePrune() error {
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||
deadr, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening deadset: %w", err)
|
||||
}
|
||||
@ -378,7 +378,7 @@ func (s *SplitStore) completePrune() error {
|
||||
if err := deadr.Close(); err != nil {
|
||||
log.Warnf("error closing deadset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||
if err := os.Remove(s.discardSetPath()); err != nil {
|
||||
log.Warnf("error removing deadset: %s", err)
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ func init() {
|
||||
func testSplitStore(t *testing.T, cfg *Config) {
|
||||
ctx := context.Background()
|
||||
chain := &mockChain{t: t}
|
||||
fmt.Printf("Config: %v\n", cfg)
|
||||
|
||||
// the myriads of stores
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
@ -225,7 +226,7 @@ func TestSplitStoreCompaction(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
testSplitStore(t, &Config{MarkSetType: "map"})
|
||||
testSplitStore(t, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
}
|
||||
|
||||
func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
@ -237,7 +238,7 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
badgerMarkSetBatchSize = bs
|
||||
})
|
||||
testSplitStore(t, &Config{MarkSetType: "badger"})
|
||||
testSplitStore(t, &Config{MarkSetType: "badger", UniversalColdBlocks: true})
|
||||
}
|
||||
|
||||
func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
@ -283,7 +284,7 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -422,7 +423,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
|
||||
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -522,7 +523,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
|
||||
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
mx.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
}, func(cid.Cid) error { return nil })
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -21,16 +21,18 @@ func TestEmbeddedMetadata(t *testing.T) {
|
||||
|
||||
// Test that we're registering the manifest correctly.
|
||||
func TestRegistration(t *testing.T) {
|
||||
manifestCid, found := actors.GetManifest(actorstypes.Version8)
|
||||
require.True(t, found)
|
||||
require.True(t, manifestCid.Defined())
|
||||
for _, av := range []actorstypes.Version{actorstypes.Version8, actorstypes.Version9} {
|
||||
manifestCid, found := actors.GetManifest(av)
|
||||
require.True(t, found)
|
||||
require.True(t, manifestCid.Defined())
|
||||
|
||||
for _, key := range actors.GetBuiltinActorsKeys(actorstypes.Version8) {
|
||||
actorCid, found := actors.GetActorCodeID(actorstypes.Version8, key)
|
||||
require.True(t, found)
|
||||
name, version, found := actors.GetActorMetaByCode(actorCid)
|
||||
require.True(t, found)
|
||||
require.Equal(t, actorstypes.Version8, version)
|
||||
require.Equal(t, key, name)
|
||||
for _, key := range actors.GetBuiltinActorsKeys(av) {
|
||||
actorCid, found := actors.GetActorCodeID(av, key)
|
||||
require.True(t, found)
|
||||
name, version, found := actors.GetActorMetaByCode(actorCid)
|
||||
require.True(t, found)
|
||||
require.Equal(t, av, version)
|
||||
require.Equal(t, key, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.18.2"
|
||||
const BuildVersion = "1.19.0"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
Address = builtin9.DatacapActorAddr
|
||||
Methods = builtin9.MethodsDatacap
|
||||
Address = builtin{{.latestVersion}}.DatacapActorAddr
|
||||
Methods = builtin{{.latestVersion}}.MethodsDatacap
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
|
@ -82,6 +82,7 @@ var (
|
||||
ErrRBFTooLowPremium = errors.New("replace by fee has too low GasPremium")
|
||||
ErrTooManyPendingMessages = errors.New("too many pending messages for actor")
|
||||
ErrNonceGap = errors.New("unfulfilled nonce gap")
|
||||
ErrExistingNonce = errors.New("message with nonce already exists")
|
||||
)
|
||||
|
||||
const (
|
||||
@ -112,7 +113,7 @@ type MessagePoolEvtMessage struct {
|
||||
|
||||
func init() {
|
||||
// if the republish interval is too short compared to the pubsub timecache, adjust it
|
||||
minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)
|
||||
minInterval := pubsub.TimeCacheDuration + time.Duration(build.PropagationDelaySecs)*time.Second
|
||||
if RepublishInterval < minInterval {
|
||||
RepublishInterval = minInterval
|
||||
}
|
||||
@ -276,7 +277,7 @@ func (ms *msgSet) add(m *types.SignedMessage, mp *MessagePool, strict, untrusted
|
||||
}
|
||||
} else {
|
||||
return false, xerrors.Errorf("message from %s with nonce %d already in mpool: %w",
|
||||
m.Message.From, m.Message.Nonce, ErrSoftValidationFailure)
|
||||
m.Message.From, m.Message.Nonce, ErrExistingNonce)
|
||||
}
|
||||
|
||||
ms.requiredFunds.Sub(ms.requiredFunds, exms.Message.RequiredFunds().Int)
|
||||
@ -667,7 +668,9 @@ func (mp *MessagePool) verifyMsgBeforeAdd(ctx context.Context, m *types.SignedMe
|
||||
return publish, nil
|
||||
}
|
||||
|
||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Cid, error) {
|
||||
// Push checks the signed message for any violations, adds the message to the message pool and
|
||||
// publishes the message if the publish flag is set
|
||||
func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage, publish bool) (cid.Cid, error) {
|
||||
done := metrics.Timer(ctx, metrics.MpoolPushDuration)
|
||||
defer done()
|
||||
|
||||
@ -683,14 +686,14 @@ func (mp *MessagePool) Push(ctx context.Context, m *types.SignedMessage) (cid.Ci
|
||||
}()
|
||||
|
||||
mp.curTsLk.Lock()
|
||||
publish, err := mp.addTs(ctx, m, mp.curTs, true, false)
|
||||
ok, err := mp.addTs(ctx, m, mp.curTs, true, false)
|
||||
if err != nil {
|
||||
mp.curTsLk.Unlock()
|
||||
return cid.Undef, err
|
||||
}
|
||||
mp.curTsLk.Unlock()
|
||||
|
||||
if publish {
|
||||
if ok && publish {
|
||||
msgb, err := m.Serialize()
|
||||
if err != nil {
|
||||
return cid.Undef, xerrors.Errorf("error serializing message: %w", err)
|
||||
@ -1583,3 +1586,8 @@ func getBaseFeeLowerBound(baseFee, factor types.BigInt) types.BigInt {
|
||||
|
||||
return baseFeeLowerBound
|
||||
}
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
}
|
||||
|
@ -545,7 +545,7 @@ func TestLoadLocal(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
cid, err := mp.Push(context.TODO(), m)
|
||||
cid, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -618,7 +618,7 @@ func TestClearAll(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -676,7 +676,7 @@ func TestClearNonLocal(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -749,7 +749,7 @@ func TestUpdates(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagesigner"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -42,7 +41,7 @@ type mpoolProvider struct {
|
||||
sm *stmgr.StateManager
|
||||
ps *pubsub.PubSub
|
||||
|
||||
lite messagesigner.MpoolNonceAPI
|
||||
lite MpoolNonceAPI
|
||||
}
|
||||
|
||||
var _ Provider = (*mpoolProvider)(nil)
|
||||
@ -51,7 +50,7 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps}
|
||||
}
|
||||
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer messagesigner.MpoolNonceAPI) Provider {
|
||||
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
|
||||
return &mpoolProvider{sm: sm, ps: ps, lite: noncer}
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ func TestRepubMessages(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1))
|
||||
//stm: @CHAIN_MEMPOOL_PUSH_001
|
||||
_, err := mp.Push(context.TODO(), m)
|
||||
_, err := mp.Push(context.TODO(), m, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
@ -24,9 +25,12 @@ const dsKeyMsgUUIDSet = "MsgUuidSet"
|
||||
|
||||
var log = logging.Logger("messagesigner")
|
||||
|
||||
type MpoolNonceAPI interface {
|
||||
GetNonce(context.Context, address.Address, types.TipSetKey) (uint64, error)
|
||||
GetActor(context.Context, address.Address, types.TipSetKey) (*types.Actor, error)
|
||||
type MsgSigner interface {
|
||||
SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error)
|
||||
GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error)
|
||||
StoreSignedMessage(ctx context.Context, uuid uuid.UUID, message *types.SignedMessage) error
|
||||
NextNonce(ctx context.Context, addr address.Address) (uint64, error)
|
||||
SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error
|
||||
}
|
||||
|
||||
// MessageSigner keeps track of nonces per address, and increments the nonce
|
||||
@ -34,11 +38,11 @@ type MpoolNonceAPI interface {
|
||||
type MessageSigner struct {
|
||||
wallet api.Wallet
|
||||
lk sync.Mutex
|
||||
mpool MpoolNonceAPI
|
||||
mpool messagepool.MpoolNonceAPI
|
||||
ds datastore.Batching
|
||||
}
|
||||
|
||||
func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
func NewMessageSigner(wallet api.Wallet, mpool messagepool.MpoolNonceAPI, ds dtypes.MetadataDS) *MessageSigner {
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer/"))
|
||||
return &MessageSigner{
|
||||
wallet: wallet,
|
||||
@ -49,12 +53,12 @@ func NewMessageSigner(wallet api.Wallet, mpool MpoolNonceAPI, ds dtypes.Metadata
|
||||
|
||||
// SignMessage increments the nonce for the message From address, and signs
|
||||
// the message
|
||||
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
ms.lk.Lock()
|
||||
defer ms.lk.Unlock()
|
||||
|
||||
// Get the next message nonce
|
||||
nonce, err := ms.nextNonce(ctx, msg.From)
|
||||
nonce, err := ms.NextNonce(ctx, msg.From)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to create nonce: %w", err)
|
||||
}
|
||||
@ -72,7 +76,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
|
||||
Extra: mb.RawData(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to sign message: %w", err)
|
||||
return nil, xerrors.Errorf("failed to sign message: %w, addr=%s", err, msg.From)
|
||||
}
|
||||
|
||||
// Callback with the signed message
|
||||
@ -80,13 +84,14 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb
|
||||
Message: *msg,
|
||||
Signature: *sig,
|
||||
}
|
||||
|
||||
err = cb(smsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the callback executed successfully, write the nonce to the datastore
|
||||
if err := ms.saveNonce(ctx, msg.From, nonce); err != nil {
|
||||
if err := ms.SaveNonce(ctx, msg.From, nonce); err != nil {
|
||||
return nil, xerrors.Errorf("failed to save nonce: %w", err)
|
||||
}
|
||||
|
||||
@ -113,9 +118,9 @@ func (ms *MessageSigner) StoreSignedMessage(ctx context.Context, uuid uuid.UUID,
|
||||
return ms.ds.Put(ctx, key, serializedMsg)
|
||||
}
|
||||
|
||||
// nextNonce gets the next nonce for the given address.
|
||||
// NextNonce gets the next nonce for the given address.
|
||||
// If there is no nonce in the datastore, gets the nonce from the message pool.
|
||||
func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||
func (ms *MessageSigner) NextNonce(ctx context.Context, addr address.Address) (uint64, error) {
|
||||
// Nonces used to be created by the mempool and we need to support nodes
|
||||
// that have mempool nonces, so first check the mempool for a nonce for
|
||||
// this address. Note that the mempool returns the actor state's nonce
|
||||
@ -159,9 +164,9 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u
|
||||
}
|
||||
}
|
||||
|
||||
// saveNonce increments the nonce for this address and writes it to the
|
||||
// SaveNonce increments the nonce for this address and writes it to the
|
||||
// datastore
|
||||
func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
|
||||
func (ms *MessageSigner) SaveNonce(ctx context.Context, addr address.Address, nonce uint64) error {
|
||||
// Increment the nonce
|
||||
nonce++
|
||||
|
||||
|
98
chain/messagesigner/messagesigner_consensus.go
Normal file
98
chain/messagesigner/messagesigner_consensus.go
Normal file
@ -0,0 +1,98 @@
|
||||
package messagesigner
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
type MessageSignerConsensus struct {
|
||||
MsgSigner
|
||||
Consensus *consensus.Consensus
|
||||
}
|
||||
|
||||
func NewMessageSignerConsensus(
|
||||
wallet api.Wallet,
|
||||
mpool messagepool.MpoolNonceAPI,
|
||||
ds dtypes.MetadataDS,
|
||||
consensus *consensus.Consensus) *MessageSignerConsensus {
|
||||
|
||||
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
|
||||
return &MessageSignerConsensus{
|
||||
MsgSigner: &MessageSigner{
|
||||
wallet: wallet,
|
||||
mpool: mpool,
|
||||
ds: ds,
|
||||
},
|
||||
Consensus: consensus,
|
||||
}
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
|
||||
return ms.Consensus.IsLeader(ctx)
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
|
||||
ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
|
||||
if err != nil {
|
||||
return ok, err
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) SignMessage(
|
||||
ctx context.Context,
|
||||
msg *types.Message,
|
||||
spec *api.MessageSendSpec,
|
||||
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
|
||||
|
||||
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
op := &consensus.ConsensusOp{
|
||||
Nonce: signedMsg.Message.Nonce,
|
||||
Uuid: spec.MsgUuid,
|
||||
Addr: signedMsg.Message.From,
|
||||
SignedMsg: signedMsg,
|
||||
}
|
||||
err = ms.Consensus.Commit(ctx, op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedMsg, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
|
||||
cstate, err := ms.Consensus.State(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//cstate := state.(Consensus.RaftState)
|
||||
msg, ok := cstate.MsgUuids[uuid]
|
||||
if !ok {
|
||||
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
|
||||
return ms.Consensus.State(ctx)
|
||||
}
|
||||
|
||||
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
|
||||
return ms.Consensus.Leader(ctx)
|
||||
}
|
@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/messagepool"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
)
|
||||
@ -22,7 +23,7 @@ type mockMpool struct {
|
||||
nonces map[address.Address]uint64
|
||||
}
|
||||
|
||||
var _ MpoolNonceAPI = (*mockMpool)(nil)
|
||||
var _ messagepool.MpoolNonceAPI = (*mockMpool)(nil)
|
||||
|
||||
func newMockMpool() *mockMpool {
|
||||
return &mockMpool{nonces: make(map[address.Address]uint64)}
|
||||
@ -187,7 +188,7 @@ func TestMessageSignerSignMessage(t *testing.T) {
|
||||
mpool.setNonce(m.msg.From, m.mpoolNonce[0])
|
||||
}
|
||||
merr := m.cbErr
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, func(message *types.SignedMessage) error {
|
||||
smsg, err := ms.SignMessage(ctx, m.msg, nil, func(message *types.SignedMessage) error {
|
||||
return merr
|
||||
})
|
||||
|
||||
|
@ -159,10 +159,18 @@ func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version)
|
||||
return xerrors.New("invalid 'To' address")
|
||||
}
|
||||
|
||||
if !abi.AddressValidForNetworkVersion(m.To, version) {
|
||||
return xerrors.New("'To' address protocol unsupported for network version")
|
||||
}
|
||||
|
||||
if m.From == address.Undef {
|
||||
return xerrors.New("'From' address cannot be empty")
|
||||
}
|
||||
|
||||
if !abi.AddressValidForNetworkVersion(m.From, version) {
|
||||
return xerrors.New("'From' address protocol unsupported for network version")
|
||||
}
|
||||
|
||||
if m.Value.Int == nil {
|
||||
return xerrors.New("'Value' cannot be nil")
|
||||
}
|
||||
|
@ -105,10 +105,20 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.Conse
|
||||
return nil, xerrors.Errorf("cannot decode first block header: %w", decodeErr)
|
||||
}
|
||||
|
||||
// A _valid_ block must use an ID address, but that's not what we're checking here. We're
|
||||
// just making sure that adding additional address protocols won't lead to consensus issues.
|
||||
if !abi.AddressValidForNetworkVersion(blockA.Miner, ss.networkVersion) {
|
||||
return nil, xerrors.Errorf("address protocol unsupported in current network version: %d", blockA.Miner.Protocol())
|
||||
}
|
||||
|
||||
if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil {
|
||||
return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr)
|
||||
}
|
||||
|
||||
if !abi.AddressValidForNetworkVersion(blockB.Miner, ss.networkVersion) {
|
||||
return nil, xerrors.Errorf("address protocol unsupported in current network version: %d", blockB.Miner.Protocol())
|
||||
}
|
||||
|
||||
// workaround chain halt
|
||||
if build.IsNearUpgrade(blockA.Height, build.UpgradeOrangeHeight) {
|
||||
return nil, xerrors.Errorf("consensus reporting disabled around Upgrade Orange")
|
||||
@ -170,6 +180,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.Conse
|
||||
return nil, xerrors.Errorf("cannot decode extra: %w", decodeErr)
|
||||
}
|
||||
|
||||
if !abi.AddressValidForNetworkVersion(blockC.Miner, ss.networkVersion) {
|
||||
return nil, xerrors.Errorf("address protocol unsupported in current network version: %d", blockC.Miner.Protocol())
|
||||
}
|
||||
|
||||
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
|
||||
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
|
||||
consensusFault = &runtime7.ConsensusFault{
|
||||
|
@ -227,6 +227,10 @@ type VMOpts struct {
|
||||
}
|
||||
|
||||
func NewLegacyVM(ctx context.Context, opts *VMOpts) (*LegacyVM, error) {
|
||||
if opts.NetworkVersion >= network.Version16 {
|
||||
return nil, xerrors.Errorf("the legacy VM does not support network versions 16+")
|
||||
}
|
||||
|
||||
buf := blockstore.NewBuffered(opts.Bstore)
|
||||
cst := cbor.NewCborStore(buf)
|
||||
state, err := state.LoadStateTree(cst, opts.StateBase)
|
||||
|
@ -62,6 +62,7 @@ var ChainCmd = &cli.Command{
|
||||
ChainDecodeCmd,
|
||||
ChainEncodeCmd,
|
||||
ChainDisputeSetCmd,
|
||||
ChainPruneCmd,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -3,14 +3,9 @@ package cli
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -29,8 +24,6 @@ import (
|
||||
"github.com/ipld/go-ipld-prime/traversal/selector/builder"
|
||||
selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse"
|
||||
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -40,6 +33,7 @@ import (
|
||||
|
||||
lapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/markets/utils"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
@ -181,12 +175,14 @@ func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *l
|
||||
event = retrievalmarket.ClientEvents[*evt.Event]
|
||||
}
|
||||
|
||||
printf("Recv %s, Paid %s, %s (%s), %s\n",
|
||||
printf("Recv %s, Paid %s, %s (%s), %s [%d|%d]\n",
|
||||
types.SizeStr(types.NewInt(evt.BytesReceived)),
|
||||
types.FIL(evt.TotalPaid),
|
||||
strings.TrimPrefix(event, "ClientEvent"),
|
||||
strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"),
|
||||
time.Now().Sub(start).Truncate(time.Millisecond),
|
||||
evt.ID,
|
||||
types.NewInt(evt.BytesReceived),
|
||||
)
|
||||
|
||||
switch evt.Status {
|
||||
@ -335,60 +331,6 @@ Examples:
|
||||
},
|
||||
}
|
||||
|
||||
func ClientExportStream(apiAddr string, apiAuth http.Header, eref lapi.ExportRef, car bool) (io.ReadCloser, error) {
|
||||
rj, err := json.Marshal(eref)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("marshaling export ref: %w", err)
|
||||
}
|
||||
|
||||
ma, err := multiaddr.NewMultiaddr(apiAddr)
|
||||
if err == nil {
|
||||
_, addr, err := manet.DialArgs(ma)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: make cliutil helpers for this
|
||||
apiAddr = "http://" + addr
|
||||
}
|
||||
|
||||
aa, err := url.Parse(apiAddr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing api address: %w", err)
|
||||
}
|
||||
switch aa.Scheme {
|
||||
case "ws":
|
||||
aa.Scheme = "http"
|
||||
case "wss":
|
||||
aa.Scheme = "https"
|
||||
}
|
||||
|
||||
aa.Path = path.Join(aa.Path, "rest/v0/export")
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header = apiAuth
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
em, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("reading error body: %w", err)
|
||||
}
|
||||
|
||||
resp.Body.Close() // nolint
|
||||
return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
var clientRetrieveCatCmd = &cli.Command{
|
||||
Name: "cat",
|
||||
Usage: "Show data from network",
|
||||
@ -438,7 +380,7 @@ var clientRetrieveCatCmd = &cli.Command{
|
||||
eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel})
|
||||
}
|
||||
|
||||
rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false)
|
||||
rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -526,7 +468,7 @@ var clientRetrieveLsCmd = &cli.Command{
|
||||
DataSelector: &dataSelector,
|
||||
})
|
||||
|
||||
rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true)
|
||||
rc, err := cliutil.ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("export: %w", err)
|
||||
}
|
||||
@ -581,6 +523,7 @@ var clientRetrieveLsCmd = &cli.Command{
|
||||
dserv,
|
||||
roots[0],
|
||||
sel,
|
||||
nil,
|
||||
func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error {
|
||||
if r == traversal.VisitReason_SelectionMatch {
|
||||
fmt.Println(p.Path)
|
||||
|
@ -41,7 +41,13 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
start, err := fullapi.StartTime(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Network: %s\n", network.NetworkName)
|
||||
fmt.Printf("StartTime: %s (started at %s)\n", time.Now().Sub(start).Truncate(time.Second), start.Truncate(time.Second))
|
||||
fmt.Print("Chain: ")
|
||||
err = SyncBasefeeCheck(ctx, fullapi)
|
||||
if err != nil {
|
||||
|
@ -124,8 +124,9 @@ var NetPeers = &cli.Command{
|
||||
}
|
||||
|
||||
var NetPing = &cli.Command{
|
||||
Name: "ping",
|
||||
Usage: "Ping peers",
|
||||
Name: "ping",
|
||||
Usage: "Ping peers",
|
||||
ArgsUsage: "[peerMultiaddr]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "count",
|
||||
|
30
cli/state.go
30
cli/state.go
@ -44,7 +44,9 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
)
|
||||
|
||||
var StateCmd = &cli.Command{
|
||||
@ -229,7 +231,7 @@ var StateMinerInfo = &cli.Command{
|
||||
return xerrors.Errorf("getting miner info: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Proving Period Start:\t%s\n", EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||
fmt.Printf("Proving Period Start:\t%s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.PeriodStart))
|
||||
|
||||
return nil
|
||||
},
|
||||
@ -294,6 +296,28 @@ func ParseTipSetRef(ctx context.Context, api v0api.FullNode, tss string) (*types
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
func ParseTipSetRefOffline(ctx context.Context, cs *store.ChainStore, tss string) (*types.TipSet, error) {
|
||||
switch {
|
||||
|
||||
case tss == "" || tss == "@head":
|
||||
return cs.GetHeaviestTipSet(), nil
|
||||
|
||||
case tss[0] != '@':
|
||||
cids, err := ParseTipSetString(tss)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to parse tipset (%q): %w", tss, err)
|
||||
}
|
||||
return cs.LoadTipSet(ctx, types.NewTipSetKey(cids...))
|
||||
|
||||
default:
|
||||
var h uint64
|
||||
if _, err := fmt.Sscanf(tss, "@%d", &h); err != nil {
|
||||
return nil, xerrors.Errorf("parsing height tipset ref: %w", err)
|
||||
}
|
||||
return cs.GetTipsetByHeight(ctx, abi.ChainEpoch(h), cs.GetHeaviestTipSet(), true)
|
||||
}
|
||||
}
|
||||
|
||||
var StatePowerCmd = &cli.Command{
|
||||
Name: "power",
|
||||
Usage: "Query network or miner power",
|
||||
@ -1793,8 +1817,8 @@ var StateSectorCmd = &cli.Command{
|
||||
}
|
||||
fmt.Println("DealIDs: ", si.DealIDs)
|
||||
fmt.Println()
|
||||
fmt.Println("Activation: ", EpochTimeTs(ts.Height(), si.Activation, ts))
|
||||
fmt.Println("Expiration: ", EpochTimeTs(ts.Height(), si.Expiration, ts))
|
||||
fmt.Println("Activation: ", cliutil.EpochTimeTs(ts.Height(), si.Activation, ts))
|
||||
fmt.Println("Expiration: ", cliutil.EpochTimeTs(ts.Height(), si.Expiration, ts))
|
||||
fmt.Println()
|
||||
fmt.Println("DealWeight: ", si.DealWeight)
|
||||
fmt.Println("VerifiedDealWeight: ", si.VerifiedDealWeight)
|
||||
|
39
cli/util.go
39
cli/util.go
@ -2,19 +2,13 @@ package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/hako/durafmt"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/mattn/go-isatty"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
@ -43,36 +37,3 @@ func parseTipSet(ctx context.Context, api v0api.FullNode, vals []string) (*types
|
||||
|
||||
return types.NewTipSet(headers)
|
||||
}
|
||||
|
||||
func EpochTime(curr, e abi.ChainEpoch) string {
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (now)", e)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
||||
|
||||
// EpochTimeTs is like EpochTime, but also outputs absolute time. `ts` is only
|
||||
// used to provide a timestamp at some epoch to calculate time from. It can be
|
||||
// a genesis tipset.
|
||||
//
|
||||
// Example output: `1944975 (01 Jul 22 08:07 CEST, 10 hours 29 minutes ago)`
|
||||
func EpochTimeTs(curr, e abi.ChainEpoch, ts *types.TipSet) string {
|
||||
timeStr := time.Unix(int64(ts.MinTimestamp()+(uint64(e-ts.Height())*build.BlockDelaySecs)), 0).Format(time.RFC822)
|
||||
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s, %s ago)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (%s, now)", e, timeStr)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (%s, in %s)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
||||
|
203
cli/util/api.go
203
cli/util/api.go
@ -8,8 +8,10 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"reflect"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -21,6 +23,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/lib/retry"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
@ -36,7 +39,7 @@ const (
|
||||
// 2. *_API_INFO environment variables
|
||||
// 3. deprecated *_API_INFO environment variables
|
||||
// 4. *-repo command line flags.
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
func GetAPIInfoMulti(ctx *cli.Context, t repo.RepoType) ([]APIInfo, error) {
|
||||
// Check if there was a flag passed with the listen address of the API
|
||||
// server (only used by the tests)
|
||||
for _, f := range t.APIFlags() {
|
||||
@ -46,7 +49,7 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
strma := ctx.String(f)
|
||||
strma = strings.TrimSpace(strma)
|
||||
|
||||
return APIInfo{Addr: strma}, nil
|
||||
return []APIInfo{{Addr: strma}}, nil
|
||||
}
|
||||
|
||||
//
|
||||
@ -56,14 +59,14 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
primaryEnv, fallbacksEnvs, deprecatedEnvs := t.APIInfoEnvVars()
|
||||
env, ok := os.LookupEnv(primaryEnv)
|
||||
if ok {
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
|
||||
for _, env := range deprecatedEnvs {
|
||||
env, ok := os.LookupEnv(env)
|
||||
if ok {
|
||||
log.Warnf("Using deprecated env(%s) value, please use env(%s) instead.", env, primaryEnv)
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,26 +79,26 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
|
||||
p, err := homedir.Expand(path)
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not expand home dir (%s): %w", f, err)
|
||||
}
|
||||
|
||||
r, err := repo.NewFS(p)
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not open repo at path: %s; %w", p, err)
|
||||
}
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("repo.Exists returned an error: %w", err)
|
||||
return []APIInfo{}, xerrors.Errorf("repo.Exists returned an error: %w", err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return APIInfo{}, errors.New("repo directory does not exist. Make sure your configuration is correct")
|
||||
return []APIInfo{}, errors.New("repo directory does not exist. Make sure your configuration is correct")
|
||||
}
|
||||
|
||||
ma, err := r.APIEndpoint()
|
||||
if err != nil {
|
||||
return APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
|
||||
return []APIInfo{}, xerrors.Errorf("could not get api endpoint: %w", err)
|
||||
}
|
||||
|
||||
token, err := r.APIToken()
|
||||
@ -103,38 +106,75 @@ func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
log.Warnf("Couldn't load CLI token, capabilities may be limited: %v", err)
|
||||
}
|
||||
|
||||
return APIInfo{
|
||||
return []APIInfo{{
|
||||
Addr: ma.String(),
|
||||
Token: token,
|
||||
}, nil
|
||||
}}, nil
|
||||
}
|
||||
|
||||
for _, env := range fallbacksEnvs {
|
||||
env, ok := os.LookupEnv(env)
|
||||
if ok {
|
||||
return ParseApiInfo(env), nil
|
||||
return ParseApiInfoMulti(env), nil
|
||||
}
|
||||
}
|
||||
|
||||
return APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
return []APIInfo{}, fmt.Errorf("could not determine API endpoint for node type: %v", t.Type())
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
ainfo, err := GetAPIInfo(ctx, t)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get API info for %s: %w", t.Type(), err)
|
||||
func GetAPIInfo(ctx *cli.Context, t repo.RepoType) (APIInfo, error) {
|
||||
ainfos, err := GetAPIInfoMulti(ctx, t)
|
||||
if err != nil || len(ainfos) == 0 {
|
||||
return APIInfo{}, err
|
||||
}
|
||||
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
if err != nil {
|
||||
return "", nil, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
if len(ainfos) > 1 {
|
||||
log.Warn("multiple API infos received when only one was expected")
|
||||
}
|
||||
|
||||
return ainfos[0], nil
|
||||
|
||||
}
|
||||
|
||||
type HttpHead struct {
|
||||
addr string
|
||||
header http.Header
|
||||
}
|
||||
|
||||
func GetRawAPIMulti(ctx *cli.Context, t repo.RepoType, version string) ([]HttpHead, error) {
|
||||
|
||||
var httpHeads []HttpHead
|
||||
ainfos, err := GetAPIInfoMulti(ctx, t)
|
||||
if err != nil || len(ainfos) == 0 {
|
||||
return httpHeads, xerrors.Errorf("could not get API info for %s: %w", t.Type(), err)
|
||||
}
|
||||
|
||||
for _, ainfo := range ainfos {
|
||||
addr, err := ainfo.DialArgs(version)
|
||||
if err != nil {
|
||||
return httpHeads, xerrors.Errorf("could not get DialArgs: %w", err)
|
||||
}
|
||||
httpHeads = append(httpHeads, HttpHead{addr: addr, header: ainfo.AuthHeader()})
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, addr)
|
||||
_, _ = fmt.Fprintf(ctx.App.Writer, "using raw API %s endpoint: %s\n", version, httpHeads[0].addr)
|
||||
}
|
||||
|
||||
return addr, ainfo.AuthHeader(), nil
|
||||
return httpHeads, nil
|
||||
}
|
||||
|
||||
func GetRawAPI(ctx *cli.Context, t repo.RepoType, version string) (string, http.Header, error) {
|
||||
heads, err := GetRawAPIMulti(ctx, t, version)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if len(heads) > 1 {
|
||||
log.Warnf("More than 1 header received when expecting only one")
|
||||
}
|
||||
|
||||
return heads[0].addr, heads[0].header, nil
|
||||
}
|
||||
|
||||
func GetCommonAPI(ctx *cli.Context) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||
@ -185,7 +225,72 @@ func GetFullNodeAPI(ctx *cli.Context) (v0api.FullNode, jsonrpc.ClientCloser, err
|
||||
return client.NewFullNodeRPCV0(ctx.Context, addr, headers)
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
type contextKey string
|
||||
|
||||
// Not thread safe
|
||||
func OnSingleNode(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, contextKey("retry-node"), new(*int))
|
||||
}
|
||||
|
||||
func FullNodeProxy[T api.FullNode](ins []T, outstr *api.FullNodeStruct) {
|
||||
outs := api.GetInternalStructs(outstr)
|
||||
|
||||
var rins []reflect.Value
|
||||
for _, in := range ins {
|
||||
rins = append(rins, reflect.ValueOf(in))
|
||||
}
|
||||
|
||||
for _, out := range outs {
|
||||
rProxyInternal := reflect.ValueOf(out).Elem()
|
||||
|
||||
for f := 0; f < rProxyInternal.NumField(); f++ {
|
||||
field := rProxyInternal.Type().Field(f)
|
||||
|
||||
var fns []reflect.Value
|
||||
for _, rin := range rins {
|
||||
fns = append(fns, rin.MethodByName(field.Name))
|
||||
}
|
||||
|
||||
rProxyInternal.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}, &jsonrpc.ErrClient{}}
|
||||
initialBackoff, err := time.ParseDuration("1s")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx := args[0].Interface().(context.Context)
|
||||
|
||||
curr := -1
|
||||
|
||||
// for calls that need to be performed on the same node
|
||||
// primarily for miner when calling create block and submit block subsequently
|
||||
key := contextKey("retry-node")
|
||||
if ctx.Value(key) != nil {
|
||||
if (*ctx.Value(key).(**int)) == nil {
|
||||
*ctx.Value(key).(**int) = &curr
|
||||
} else {
|
||||
curr = **ctx.Value(key).(**int) - 1
|
||||
}
|
||||
}
|
||||
|
||||
total := len(rins)
|
||||
result, err := retry.Retry(ctx, 5, initialBackoff, errorsToRetry, func() (results []reflect.Value, err2 error) {
|
||||
curr = (curr + 1) % total
|
||||
|
||||
result := fns[curr].Call(args)
|
||||
if result[len(result)-1].IsNil() {
|
||||
return result, nil
|
||||
}
|
||||
e := result[len(result)-1].Interface().(error)
|
||||
return result, e
|
||||
})
|
||||
return result
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1Single(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return tn.(v1api.FullNode), func() {}, nil
|
||||
}
|
||||
@ -214,6 +319,58 @@ func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, e
|
||||
return v1API, closer, nil
|
||||
}
|
||||
|
||||
func GetFullNodeAPIV1(ctx *cli.Context) (v1api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
if tn, ok := ctx.App.Metadata["testnode-full"]; ok {
|
||||
return tn.(v1api.FullNode), func() {}, nil
|
||||
}
|
||||
|
||||
heads, err := GetRawAPIMulti(ctx, repo.FullNode, "v1")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if IsVeryVerbose {
|
||||
_, _ = fmt.Fprintln(ctx.App.Writer, "using full node API v1 endpoint:", heads[0].addr)
|
||||
}
|
||||
|
||||
var fullNodes []api.FullNode
|
||||
var closers []jsonrpc.ClientCloser
|
||||
|
||||
for _, head := range heads {
|
||||
v1api, closer, err := client.NewFullNodeRPCV1(ctx.Context, head.addr, head.header)
|
||||
if err != nil {
|
||||
log.Warnf("Not able to establish connection to node with addr: ", head.addr)
|
||||
continue
|
||||
}
|
||||
fullNodes = append(fullNodes, v1api)
|
||||
closers = append(closers, closer)
|
||||
}
|
||||
|
||||
// When running in cluster mode and trying to establish connections to multiple nodes, fail
|
||||
// if less than 2 lotus nodes are actually running
|
||||
if len(heads) > 1 && len(fullNodes) < 2 {
|
||||
return nil, nil, xerrors.Errorf("Not able to establish connection to more than a single node")
|
||||
}
|
||||
|
||||
finalCloser := func() {
|
||||
for _, c := range closers {
|
||||
c()
|
||||
}
|
||||
}
|
||||
|
||||
var v1API api.FullNodeStruct
|
||||
FullNodeProxy(fullNodes, &v1API)
|
||||
|
||||
v, err := v1API.Version(ctx.Context)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !v.APIVersion.EqMajorMinor(api.FullAPIVersion1) {
|
||||
return nil, nil, xerrors.Errorf("Remote API version didn't match (expected %s, remote %s)", api.FullAPIVersion1, v.APIVersion)
|
||||
}
|
||||
return &v1API, finalCloser, nil
|
||||
}
|
||||
|
||||
type GetStorageMinerOptions struct {
|
||||
PreferHttp bool
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ type APIInfo struct {
|
||||
|
||||
func ParseApiInfo(s string) APIInfo {
|
||||
var tok []byte
|
||||
|
||||
if infoWithToken.Match([]byte(s)) {
|
||||
sp := strings.SplitN(s, ":", 2)
|
||||
tok = []byte(sp[0])
|
||||
@ -36,6 +37,18 @@ func ParseApiInfo(s string) APIInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func ParseApiInfoMulti(s string) []APIInfo {
|
||||
var apiInfos []APIInfo
|
||||
|
||||
allAddrs := strings.SplitN(s, ",", -1)
|
||||
|
||||
for _, addr := range allAddrs {
|
||||
apiInfos = append(apiInfos, ParseApiInfo(addr))
|
||||
}
|
||||
|
||||
return apiInfos
|
||||
}
|
||||
|
||||
func (a APIInfo) DialArgs(version string) (string, error) {
|
||||
ma, err := multiaddr.NewMultiaddr(a.Addr)
|
||||
if err == nil {
|
||||
|
46
cli/util/epoch.go
Normal file
46
cli/util/epoch.go
Normal file
@ -0,0 +1,46 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hako/durafmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func EpochTime(curr, e abi.ChainEpoch) string {
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s ago)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (now)", e)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (in %s)", e, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
||||
|
||||
// EpochTimeTs is like EpochTime, but also outputs absolute time. `ts` is only
|
||||
// used to provide a timestamp at some epoch to calculate time from. It can be
|
||||
// a genesis tipset.
|
||||
//
|
||||
// Example output: `1944975 (01 Jul 22 08:07 CEST, 10 hours 29 minutes ago)`
|
||||
func EpochTimeTs(curr, e abi.ChainEpoch, ts *types.TipSet) string {
|
||||
timeStr := time.Unix(int64(ts.MinTimestamp()+(uint64(e-ts.Height())*build.BlockDelaySecs)), 0).Format(time.RFC822)
|
||||
|
||||
switch {
|
||||
case curr > e:
|
||||
return fmt.Sprintf("%d (%s, %s ago)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(curr-e))).LimitFirstN(2))
|
||||
case curr == e:
|
||||
return fmt.Sprintf("%d (%s, now)", e, timeStr)
|
||||
case curr < e:
|
||||
return fmt.Sprintf("%d (%s, in %s)", e, timeStr, durafmt.Parse(time.Second*time.Duration(int64(build.BlockDelaySecs)*int64(e-curr))).LimitFirstN(2))
|
||||
}
|
||||
|
||||
panic("math broke")
|
||||
}
|
78
cli/util/retrieval.go
Normal file
78
cli/util/retrieval.go
Normal file
@ -0,0 +1,78 @@
|
||||
package cliutil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
func ApiAddrToUrl(apiAddr string) (*url.URL, error) {
|
||||
ma, err := multiaddr.NewMultiaddr(apiAddr)
|
||||
if err == nil {
|
||||
_, addr, err := manet.DialArgs(ma)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// todo: make cliutil helpers for this
|
||||
apiAddr = "http://" + addr
|
||||
}
|
||||
aa, err := url.Parse(apiAddr)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("parsing api address: %w", err)
|
||||
}
|
||||
switch aa.Scheme {
|
||||
case "ws":
|
||||
aa.Scheme = "http"
|
||||
case "wss":
|
||||
aa.Scheme = "https"
|
||||
}
|
||||
|
||||
return aa, nil
|
||||
}
|
||||
|
||||
func ClientExportStream(apiAddr string, apiAuth http.Header, eref api.ExportRef, car bool) (io.ReadCloser, error) {
|
||||
rj, err := json.Marshal(eref)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("marshaling export ref: %w", err)
|
||||
}
|
||||
|
||||
aa, err := ApiAddrToUrl(apiAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aa.Path = path.Join(aa.Path, "rest/v0/export")
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header = apiAuth
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
em, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("reading error body: %w", err)
|
||||
}
|
||||
|
||||
resp.Body.Close() // nolint
|
||||
return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em))
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
}
|
@ -260,6 +260,7 @@ var walletSetDefault = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Default address set to:", addr)
|
||||
return api.WalletSetDefault(ctx, addr)
|
||||
},
|
||||
}
|
||||
@ -517,6 +518,8 @@ var walletDelete = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Soft deleting address:", addr)
|
||||
fmt.Println("Hard deletion of the address in `~/.lotus/keystore` is needed for permanent removal")
|
||||
return api.WalletDelete(ctx, addr)
|
||||
},
|
||||
}
|
||||
|
@ -595,6 +595,7 @@ var actorControlList = &cli.Command{
|
||||
|
||||
printKey("owner", mi.Owner)
|
||||
printKey("worker", mi.Worker)
|
||||
printKey("beneficiary", mi.Beneficiary)
|
||||
for i, ca := range mi.ControlAddresses {
|
||||
printKey(fmt.Sprintf("control-%d", i), ca)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/reward"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/journal/alerting"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||
@ -92,6 +93,12 @@ func infoCmdAct(cctx *cli.Context) error {
|
||||
|
||||
fmt.Println("Enabled subsystems (from markets API):", subsystems)
|
||||
|
||||
start, err := fullapi.StartTime(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("StartTime: %s (started at %s)\n", time.Now().Sub(start).Truncate(time.Second), start.Truncate(time.Second))
|
||||
|
||||
fmt.Print("Chain: ")
|
||||
|
||||
err = lcli.SyncBasefeeCheck(ctx, fullapi)
|
||||
@ -528,6 +535,7 @@ var stateList = []stateMeta{
|
||||
{col: color.FgYellow, state: sealing.ProveReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.SubmitReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReplicaUpdateWait},
|
||||
{col: color.FgYellow, state: sealing.WaitMutable},
|
||||
{col: color.FgYellow, state: sealing.FinalizeReplicaUpdate},
|
||||
{col: color.FgYellow, state: sealing.ReleaseSectorKey},
|
||||
|
||||
@ -658,7 +666,7 @@ func producedBlocks(ctx context.Context, count int, maddr address.Address, napi
|
||||
fmt.Printf("%8d | %s | %s\n", ts.Height(), bh.Cid(), types.FIL(minerReward))
|
||||
count--
|
||||
} else if tty && bh.Height%120 == 0 {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "\r\x1b[0KChecking epoch %s", lcli.EpochTime(head.Height(), bh.Height))
|
||||
_, _ = fmt.Fprintf(os.Stderr, "\r\x1b[0KChecking epoch %s", cliutil.EpochTime(head.Height(), bh.Height))
|
||||
}
|
||||
}
|
||||
tsk = ts.Parents()
|
||||
|
@ -49,6 +49,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/journal"
|
||||
"github.com/filecoin-project/lotus/journal/fsjournal"
|
||||
storageminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/modules"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
@ -218,7 +219,7 @@ var initCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
var localPaths []paths.LocalPath
|
||||
var localPaths []storiface.LocalPath
|
||||
|
||||
if pssb := cctx.StringSlice("pre-sealed-sectors"); len(pssb) != 0 {
|
||||
log.Infof("Setting up storage config with presealed sectors: %v", pssb)
|
||||
@ -228,14 +229,14 @@ var initCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localPaths = append(localPaths, paths.LocalPath{
|
||||
localPaths = append(localPaths, storiface.LocalPath{
|
||||
Path: psp,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if !cctx.Bool("no-local-storage") {
|
||||
b, err := json.MarshalIndent(&paths.LocalStorageMeta{
|
||||
b, err := json.MarshalIndent(&storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
@ -249,12 +250,12 @@ var initCmd = &cli.Command{
|
||||
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(lr.Path(), "sectorstore.json"), err)
|
||||
}
|
||||
|
||||
localPaths = append(localPaths, paths.LocalPath{
|
||||
localPaths = append(localPaths, storiface.LocalPath{
|
||||
Path: lr.Path(),
|
||||
})
|
||||
}
|
||||
|
||||
if err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||
if err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, localPaths...)
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("set storage config: %w", err)
|
||||
@ -471,7 +472,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
}
|
||||
stor := paths.NewRemote(lstor, si, http.Header(sa), 10, &paths.DefaultPartialFileHandler{})
|
||||
|
||||
smgr, err := sealer.New(ctx, lstor, stor, lr, si, sealer.Config{
|
||||
smgr, err := sealer.New(ctx, lstor, stor, lr, si, config.SealerConfig{
|
||||
ParallelFetchLimit: 10,
|
||||
AllowAddPiece: true,
|
||||
AllowPreCommit1: true,
|
||||
@ -481,7 +482,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode
|
||||
AllowReplicaUpdate: true,
|
||||
AllowProveReplicaUpdate2: true,
|
||||
AllowRegenSectorKey: true,
|
||||
}, wsts, smsts)
|
||||
}, config.ProvingConfig{}, wsts, smsts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/lib/backupds"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
var restoreCmd = &cli.Command{
|
||||
@ -52,7 +52,7 @@ var restoreCmd = &cli.Command{
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
log.Info("Initializing lotus miner using a backup")
|
||||
|
||||
var storageCfg *paths.StorageConfig
|
||||
var storageCfg *storiface.StorageConfig
|
||||
if cctx.IsSet("storage-config") {
|
||||
cf, err := homedir.Expand(cctx.String("storage-config"))
|
||||
if err != nil {
|
||||
@ -64,7 +64,7 @@ var restoreCmd = &cli.Command{
|
||||
return xerrors.Errorf("reading storage config: %w", err)
|
||||
}
|
||||
|
||||
storageCfg = &paths.StorageConfig{}
|
||||
storageCfg = &storiface.StorageConfig{}
|
||||
err = json.Unmarshal(cfb, storageCfg)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("cannot unmarshal json for storage config: %w", err)
|
||||
@ -95,7 +95,7 @@ var restoreCmd = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfig *paths.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi api.MinerInfo) error) error {
|
||||
func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfig *storiface.StorageConfig, manageConfig func(*config.StorageMiner) error, after func(api lapi.FullNode, addr address.Address, peerid peer.ID, mi api.MinerInfo) error) error {
|
||||
if cctx.NArg() != 1 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
@ -214,7 +214,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi
|
||||
if strConfig != nil {
|
||||
log.Info("Restoring storage path config")
|
||||
|
||||
err = lr.SetStorage(func(scfg *paths.StorageConfig) {
|
||||
err = lr.SetStorage(func(scfg *storiface.StorageConfig) {
|
||||
*scfg = *strConfig
|
||||
})
|
||||
if err != nil {
|
||||
@ -223,8 +223,8 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi
|
||||
} else {
|
||||
log.Warn("--storage-config NOT SET. NO SECTOR PATHS WILL BE CONFIGURED")
|
||||
// setting empty config to allow miner to be started
|
||||
if err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, paths.LocalPath{})
|
||||
if err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("set storage config: %w", err)
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node/config"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -78,7 +78,7 @@ var serviceCmd = &cli.Command{
|
||||
return xerrors.Errorf("please provide Lotus markets repo path via flag %s", FlagMarketsRepo)
|
||||
}
|
||||
|
||||
if err := restore(ctx, cctx, repoPath, &paths.StorageConfig{}, func(cfg *config.StorageMiner) error {
|
||||
if err := restore(ctx, cctx, repoPath, &storiface.StorageConfig{}, func(cfg *config.StorageMiner) error {
|
||||
cfg.Subsystems.EnableMarkets = es.Contains(MarketsService)
|
||||
cfg.Subsystems.EnableMining = false
|
||||
cfg.Subsystems.EnableSealing = false
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
@ -185,18 +186,18 @@ var provingInfoCmd = &cli.Command{
|
||||
fmt.Printf("Current Epoch: %d\n", cd.CurrentEpoch)
|
||||
|
||||
fmt.Printf("Proving Period Boundary: %d\n", cd.PeriodStart%cd.WPoStProvingPeriod)
|
||||
fmt.Printf("Proving Period Start: %s\n", lcli.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart, head))
|
||||
fmt.Printf("Next Period Start: %s\n\n", lcli.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod, head))
|
||||
fmt.Printf("Proving Period Start: %s\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart, head))
|
||||
fmt.Printf("Next Period Start: %s\n\n", cliutil.EpochTimeTs(cd.CurrentEpoch, cd.PeriodStart+cd.WPoStProvingPeriod, head))
|
||||
|
||||
fmt.Printf("Faults: %d (%.2f%%)\n", faults, faultPerc)
|
||||
fmt.Printf("Recovering: %d\n", recovering)
|
||||
|
||||
fmt.Printf("Deadline Index: %d\n", cd.Index)
|
||||
fmt.Printf("Deadline Sectors: %d\n", curDeadlineSectors)
|
||||
fmt.Printf("Deadline Open: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Open))
|
||||
fmt.Printf("Deadline Close: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Close))
|
||||
fmt.Printf("Deadline Challenge: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.Challenge))
|
||||
fmt.Printf("Deadline FaultCutoff: %s\n", lcli.EpochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
||||
fmt.Printf("Deadline Open: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Open))
|
||||
fmt.Printf("Deadline Close: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Close))
|
||||
fmt.Printf("Deadline Challenge: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.Challenge))
|
||||
fmt.Printf("Deadline FaultCutoff: %s\n", cliutil.EpochTime(cd.CurrentEpoch, cd.FaultCutoff))
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@ -582,7 +583,7 @@ var provingCheckProvableCmd = &cli.Command{
|
||||
})
|
||||
}
|
||||
|
||||
bad, err := minerApi.CheckProvable(ctx, info.WindowPoStProofType, tocheck, cctx.Bool("slow"))
|
||||
bad, err := minerApi.CheckProvable(ctx, info.WindowPoStProofType, tocheck)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/lib/strle"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
@ -485,9 +486,9 @@ var sectorsListCmd = &cli.Command{
|
||||
if !inSSet {
|
||||
m["Expiration"] = "n/a"
|
||||
} else {
|
||||
m["Expiration"] = lcli.EpochTime(head.Height(), exp)
|
||||
m["Expiration"] = cliutil.EpochTime(head.Height(), exp)
|
||||
if st.Early > 0 {
|
||||
m["RecoveryTimeout"] = color.YellowString(lcli.EpochTime(head.Height(), st.Early))
|
||||
m["RecoveryTimeout"] = color.YellowString(cliutil.EpochTime(head.Height(), st.Early))
|
||||
}
|
||||
}
|
||||
if inSSet && cctx.Bool("initial-pledge") {
|
||||
@ -666,10 +667,10 @@ var sectorsCheckExpireCmd = &cli.Command{
|
||||
"ID": sector.SectorNumber,
|
||||
"SealProof": sector.SealProof,
|
||||
"InitialPledge": types.FIL(sector.InitialPledge).Short(),
|
||||
"Activation": lcli.EpochTime(currEpoch, sector.Activation),
|
||||
"Expiration": lcli.EpochTime(currEpoch, sector.Expiration),
|
||||
"MaxExpiration": lcli.EpochTime(currEpoch, MaxExpiration),
|
||||
"MaxExtendNow": lcli.EpochTime(currEpoch, MaxExtendNow),
|
||||
"Activation": cliutil.EpochTime(currEpoch, sector.Activation),
|
||||
"Expiration": cliutil.EpochTime(currEpoch, sector.Expiration),
|
||||
"MaxExpiration": cliutil.EpochTime(currEpoch, MaxExpiration),
|
||||
"MaxExtendNow": cliutil.EpochTime(currEpoch, MaxExtendNow),
|
||||
})
|
||||
}
|
||||
|
||||
@ -1909,7 +1910,7 @@ var sectorsExpiredCmd = &cli.Command{
|
||||
toRemove = append(toRemove, s)
|
||||
}
|
||||
|
||||
fmt.Printf("%d%s\t%s\t%s\n", s, rmMsg, st.State, lcli.EpochTime(head.Height(), st.Expiration))
|
||||
fmt.Printf("%d%s\t%s\t%s\n", s, rmMsg, st.State, cliutil.EpochTime(head.Height(), st.Expiration))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/lib/tablewriter"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
@ -148,7 +147,7 @@ over time
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &paths.LocalStorageMeta{
|
||||
cfg := &storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: cctx.Uint64("weight"),
|
||||
CanSeal: cctx.Bool("seal"),
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
"github.com/filecoin-project/lotus/genesis"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/ffiwrapper/basicfs"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
@ -126,7 +125,7 @@ func PreSeal(maddr address.Address, spt abi.RegisteredSealProof, offset abi.Sect
|
||||
}
|
||||
|
||||
{
|
||||
b, err := json.MarshalIndent(&paths.LocalStorageMeta{
|
||||
b, err := json.MarshalIndent(&storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 0, // read-only
|
||||
CanSeal: false,
|
||||
|
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@ -420,6 +421,11 @@ var actorControlSet = &cli.Command{
|
||||
Name: "actor",
|
||||
Usage: "specify the address of miner actor",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "dump-bytes",
|
||||
Usage: "Dumps the bytes of the message that would propose this change",
|
||||
Value: false,
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "really-do-it",
|
||||
Usage: "Actually send transaction performing the action",
|
||||
@ -427,11 +433,6 @@ var actorControlSet = &cli.Command{
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if !cctx.Bool("really-do-it") {
|
||||
fmt.Println("Pass --really-do-it to actually execute this action")
|
||||
return nil
|
||||
}
|
||||
|
||||
var maddr address.Address
|
||||
if act := cctx.String("actor"); act != "" {
|
||||
var err error
|
||||
@ -521,14 +522,36 @@ var actorControlSet = &cli.Command{
|
||||
return xerrors.Errorf("serializing params: %w", err)
|
||||
}
|
||||
|
||||
smsg, err := nodeAPI.MpoolPushMessage(ctx, &types.Message{
|
||||
msg := &types.Message{
|
||||
From: mi.Owner,
|
||||
To: maddr,
|
||||
Method: builtin.MethodsMiner.ChangeWorkerAddress,
|
||||
|
||||
Value: big.Zero(),
|
||||
Params: sp,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
if cctx.Bool("dump-bytes") {
|
||||
|
||||
msg, err = nodeAPI.GasEstimateMessageGas(ctx, msg, nil, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgBytes, err := msg.Serialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(cctx.App.Writer, hex.EncodeToString(msgBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
if !cctx.Bool("really-do-it") {
|
||||
fmt.Fprintln(cctx.App.Writer, "Pass --really-do-it to actually execute this action")
|
||||
return nil
|
||||
}
|
||||
|
||||
smsg, err := nodeAPI.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("mpool push: %w", err)
|
||||
}
|
||||
|
@ -14,6 +14,8 @@ import (
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
@ -30,18 +32,18 @@ func carWalkFunc(nd format.Node) (out []*format.Link, err error) {
|
||||
|
||||
var exportCarCmd = &cli.Command{
|
||||
Name: "export-car",
|
||||
Description: "Export a car from repo (requires node to be offline)",
|
||||
Description: "Export a car from repo",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "[outfile] [root cid]",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 2 {
|
||||
return lcli.IncorrectNumArgs(cctx)
|
||||
}
|
||||
|
||||
outfile := cctx.Args().First()
|
||||
var roots []cid.Cid
|
||||
for _, arg := range cctx.Args().Tail() {
|
||||
@ -51,14 +53,11 @@ var exportCarCmd = &cli.Command{
|
||||
}
|
||||
roots = append(roots, c)
|
||||
}
|
||||
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
r, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("opening fs repo: %w", err)
|
||||
}
|
||||
|
||||
exists, err := r.Exists()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -67,11 +66,25 @@ var exportCarCmd = &cli.Command{
|
||||
return xerrors.Errorf("lotus repo doesn't exist")
|
||||
}
|
||||
|
||||
var bs blockstore.Blockstore
|
||||
|
||||
lr, err := r.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
if err == nil {
|
||||
bs, err = lr.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
} else {
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer closer()
|
||||
|
||||
bs = blockstore.NewAPIBlockstore(api)
|
||||
}
|
||||
defer lr.Close() //nolint:errcheck
|
||||
|
||||
fi, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
@ -80,11 +93,6 @@ var exportCarCmd = &cli.Command{
|
||||
|
||||
defer fi.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lr.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
@ -98,6 +106,14 @@ var exportCarCmd = &cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz, err := fi.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("done %s\n", types.SizeStr(types.NewInt(uint64(sz))))
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-shed/shedgen"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
@ -125,22 +124,9 @@ var exportChainCmd = &cli.Command{
|
||||
fullstate := cctx.Bool("full-state")
|
||||
skipoldmsgs := cctx.Bool("skip-old-msgs")
|
||||
|
||||
var ts *types.TipSet
|
||||
if tss := cctx.String("tipset"); tss != "" {
|
||||
cids, err := lcli.ParseTipSetString(tss)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to parse tipset (%q): %w", tss, err)
|
||||
}
|
||||
|
||||
tsk := types.NewTipSetKey(cids...)
|
||||
|
||||
selts, err := cs.LoadTipSet(context.Background(), tsk)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading tipset: %w", err)
|
||||
}
|
||||
ts = selts
|
||||
} else {
|
||||
ts = cs.GetHeaviestTipSet()
|
||||
ts, err := lcli.ParseTipSetRefOffline(ctx, cs, cctx.String("tipset"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fullstate {
|
||||
|
554
cmd/lotus-shed/fip-0036.go
Normal file
554
cmd/lotus-shed/fip-0036.go
Normal file
@ -0,0 +1,554 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
type Option uint64
|
||||
|
||||
const (
|
||||
Approve Option = 49
|
||||
Reject = 50
|
||||
)
|
||||
|
||||
type Vote struct {
|
||||
ID uint64
|
||||
OptionID Option
|
||||
SignerAddress address.Address
|
||||
}
|
||||
|
||||
type msigVote struct {
|
||||
Multisig msigBriefInfo
|
||||
ApproveCount uint64
|
||||
RejectCount uint64
|
||||
}
|
||||
|
||||
// https://filpoll.io/poll/16
|
||||
// snapshot height: 2162760
|
||||
// state root: bafy2bzacebdnzh43hw66bmvguk65wiwr5ssaejlq44fpdei2ysfh3eefpdlqs
|
||||
var fip36PollCmd = &cli.Command{
|
||||
Name: "fip36poll",
|
||||
Usage: "Process the FIP0036 FilPoll result",
|
||||
ArgsUsage: "[state root, votes]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
finalResultCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var finalResultCmd = &cli.Command{
|
||||
Name: "results",
|
||||
Usage: "get poll results",
|
||||
ArgsUsage: "[state root] [height] [votes json]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
|
||||
Action: func(cctx *cli.Context) error {
|
||||
if cctx.NArg() != 3 {
|
||||
return xerrors.New("filpoll0036 results [state root] [height] [votes.json]")
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass state root")
|
||||
}
|
||||
|
||||
sroot, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse input: %w", err)
|
||||
}
|
||||
|
||||
fsrepo, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
st, err := state.LoadStateTree(cst, sroot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
height, err := strconv.Atoi(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//get all the votes' signer ID address && their vote
|
||||
vj, err := homedir.Expand(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to get votes json")
|
||||
}
|
||||
votes, err := getVotesMap(vj)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get voters: ", err)
|
||||
}
|
||||
|
||||
type minerBriefInfo struct {
|
||||
rawBytePower abi.StoragePower
|
||||
dealPower abi.StoragePower
|
||||
balance abi.TokenAmount
|
||||
}
|
||||
|
||||
// power actor
|
||||
pa, err := st.GetActor(power.Address)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get power actor: \n", err)
|
||||
}
|
||||
|
||||
powerState, err := power.Load(store, pa)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get power state: \n", err)
|
||||
}
|
||||
|
||||
//market actor
|
||||
ma, err := st.GetActor(market.Address)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to get market actor: ", err)
|
||||
}
|
||||
|
||||
marketState, err := market.Load(store, ma)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to load market state: ", err)
|
||||
}
|
||||
|
||||
lookupId := func(addr address.Address) address.Address {
|
||||
ret, err := st.LookupID(addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// we need to build several pieces of information, as we traverse the state tree:
|
||||
// a map of accounts to every msig that they are a signer of
|
||||
accountsToMultisigs := make(map[address.Address][]address.Address)
|
||||
// a map of multisigs to some info about them for quick lookup
|
||||
msigActorsInfo := make(map[address.Address]msigBriefInfo)
|
||||
// a map of actors (accounts+multisigs) to every miner that they are an owner of
|
||||
ownerMap := make(map[address.Address][]address.Address)
|
||||
// a map of accounts to every miner that they are a worker of
|
||||
workerMap := make(map[address.Address][]address.Address)
|
||||
// a map of miners to some info about them for quick lookup
|
||||
minerActorsInfo := make(map[address.Address]minerBriefInfo)
|
||||
// a map of client addresses to deal data stored in proposals
|
||||
clientToDealStorage := make(map[address.Address]abi.StoragePower)
|
||||
|
||||
fmt.Println("iterating over all actors")
|
||||
count := 0
|
||||
err = st.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||
if count%200000 == 0 {
|
||||
fmt.Println("processed ", count, " actors building maps")
|
||||
}
|
||||
count++
|
||||
if builtin.IsMultisigActor(act.Code) {
|
||||
ms, err := multisig.Load(store, act)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load msig failed %v", err)
|
||||
|
||||
}
|
||||
|
||||
// TODO: Confirm that these are always ID addresses
|
||||
signers, err := ms.Signers()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to get msig signers", err)
|
||||
}
|
||||
for _, s := range signers {
|
||||
signerId := lookupId(s)
|
||||
accountsToMultisigs[signerId] = append(accountsToMultisigs[signerId], addr)
|
||||
}
|
||||
|
||||
locked, err := ms.LockedBalance(abi.ChainEpoch(height))
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to compute locked multisig balance: %w", err)
|
||||
}
|
||||
|
||||
threshold, _ := ms.Threshold()
|
||||
info := msigBriefInfo{
|
||||
ID: addr,
|
||||
Signer: signers,
|
||||
Balance: big.Max(big.Zero(), types.BigSub(act.Balance, locked)),
|
||||
Threshold: threshold,
|
||||
}
|
||||
msigActorsInfo[addr] = info
|
||||
}
|
||||
|
||||
if builtin.IsStorageMinerActor(act.Code) {
|
||||
m, err := miner.Load(store, act)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to load miner actor: \n", err)
|
||||
}
|
||||
|
||||
info, err := m.Info()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to get miner info: \n", err)
|
||||
}
|
||||
|
||||
ownerId := lookupId(info.Owner)
|
||||
ownerMap[ownerId] = append(ownerMap[ownerId], addr)
|
||||
|
||||
workerId := lookupId(info.Worker)
|
||||
workerMap[workerId] = append(workerMap[workerId], addr)
|
||||
|
||||
lockedFunds, err := m.LockedFunds()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bal := big.Sub(act.Balance, lockedFunds.TotalLockedFunds())
|
||||
bal = big.Max(big.Zero(), bal)
|
||||
|
||||
pow, ok, err := powerState.MinerPower(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !ok {
|
||||
pow.RawBytePower = big.Zero()
|
||||
}
|
||||
|
||||
minerActorsInfo[addr] = minerBriefInfo{
|
||||
rawBytePower: pow.RawBytePower,
|
||||
// gets added up outside this loop
|
||||
dealPower: big.Zero(),
|
||||
balance: bal,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("iterating over proposals")
|
||||
dealProposals, err := marketState.Proposals()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dealStates, err := marketState.States()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dealProposals.ForEach(func(dealID abi.DealID, d market.DealProposal) error {
|
||||
|
||||
dealState, ok, err := dealStates.Get(dealID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok || dealState.SectorStartEpoch == -1 {
|
||||
// effectively a continue
|
||||
return nil
|
||||
}
|
||||
|
||||
clientId := lookupId(d.Client)
|
||||
if cd, found := clientToDealStorage[clientId]; found {
|
||||
clientToDealStorage[clientId] = big.Add(cd, big.NewInt(int64(d.PieceSize)))
|
||||
} else {
|
||||
clientToDealStorage[clientId] = big.NewInt(int64(d.PieceSize))
|
||||
}
|
||||
|
||||
providerId := lookupId(d.Provider)
|
||||
mai, found := minerActorsInfo[providerId]
|
||||
|
||||
if !found {
|
||||
return xerrors.Errorf("didn't find miner %s", providerId)
|
||||
}
|
||||
|
||||
mai.dealPower = big.Add(mai.dealPower, big.NewInt(int64(d.PieceSize)))
|
||||
minerActorsInfo[providerId] = mai
|
||||
return nil
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("fail to get deals")
|
||||
}
|
||||
|
||||
// now tabulate votes
|
||||
|
||||
approveBalance := abi.NewTokenAmount(0)
|
||||
rejectionBalance := abi.NewTokenAmount(0)
|
||||
clientApproveBytes := big.Zero()
|
||||
clientRejectBytes := big.Zero()
|
||||
msigPendingVotes := make(map[address.Address]msigVote) //map[msig ID]msigVote
|
||||
msigVotes := make(map[address.Address]Option)
|
||||
minerVotes := make(map[address.Address]Option)
|
||||
fmt.Println("counting account and multisig votes")
|
||||
for _, vote := range votes {
|
||||
signerId, err := st.LookupID(vote.SignerAddress)
|
||||
if err != nil {
|
||||
fmt.Println("voter ", vote.SignerAddress, " not found in state tree, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
//process votes for regular accounts
|
||||
accountActor, err := st.GetActor(signerId)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("fail to get account account for signer: ", err)
|
||||
}
|
||||
|
||||
clientBytes, ok := clientToDealStorage[signerId]
|
||||
if !ok {
|
||||
clientBytes = big.Zero()
|
||||
}
|
||||
|
||||
if vote.OptionID == Approve {
|
||||
approveBalance = types.BigAdd(approveBalance, accountActor.Balance)
|
||||
clientApproveBytes = big.Add(clientApproveBytes, clientBytes)
|
||||
} else {
|
||||
rejectionBalance = types.BigAdd(rejectionBalance, accountActor.Balance)
|
||||
clientRejectBytes = big.Add(clientRejectBytes, clientBytes)
|
||||
}
|
||||
|
||||
if minerInfos, found := ownerMap[signerId]; found {
|
||||
for _, minerInfo := range minerInfos {
|
||||
minerVotes[minerInfo] = vote.OptionID
|
||||
}
|
||||
}
|
||||
if minerInfos, found := workerMap[signerId]; found {
|
||||
for _, minerInfo := range minerInfos {
|
||||
if _, ok := minerVotes[minerInfo]; !ok {
|
||||
minerVotes[minerInfo] = vote.OptionID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//process msigs
|
||||
// There is a possibility that enough signers have voted for BOTH options in the poll to be above the threshold
|
||||
// Because we are iterating over votes in order they arrived, the first option to go over the threshold will win
|
||||
// This is in line with onchain behaviour (consider a case where signers are competing to withdraw all the funds
|
||||
// in an msig into 2 different accounts)
|
||||
if mss, found := accountsToMultisigs[signerId]; found {
|
||||
for _, ms := range mss { //get all the msig signer has
|
||||
if _, ok := msigVotes[ms]; ok {
|
||||
// msig has already voted, skip
|
||||
continue
|
||||
}
|
||||
if mpv, found := msigPendingVotes[ms]; found { //other signers of the multisig have voted, yet the threshold has not met
|
||||
if vote.OptionID == Approve {
|
||||
if mpv.ApproveCount+1 == mpv.Multisig.Threshold { //met threshold
|
||||
approveBalance = types.BigAdd(approveBalance, mpv.Multisig.Balance)
|
||||
delete(msigPendingVotes, ms) //threshold, can skip later signer votes
|
||||
msigVotes[ms] = vote.OptionID
|
||||
|
||||
} else {
|
||||
mpv.ApproveCount++
|
||||
msigPendingVotes[ms] = mpv
|
||||
}
|
||||
} else {
|
||||
if mpv.RejectCount+1 == mpv.Multisig.Threshold { //met threshold
|
||||
rejectionBalance = types.BigAdd(rejectionBalance, mpv.Multisig.Balance)
|
||||
delete(msigPendingVotes, ms) //threshold, can skip later signer votes
|
||||
msigVotes[ms] = vote.OptionID
|
||||
|
||||
} else {
|
||||
mpv.RejectCount++
|
||||
msigPendingVotes[ms] = mpv
|
||||
}
|
||||
}
|
||||
} else { //first vote received from one of the signers of the msig
|
||||
msi, ok := msigActorsInfo[ms]
|
||||
if !ok {
|
||||
return xerrors.Errorf("didn't find msig %s in msig map", ms)
|
||||
}
|
||||
|
||||
if msi.Threshold == 1 { //met threshold with this signer's single vote
|
||||
if vote.OptionID == Approve {
|
||||
approveBalance = types.BigAdd(approveBalance, msi.Balance)
|
||||
msigVotes[ms] = Approve
|
||||
|
||||
} else {
|
||||
rejectionBalance = types.BigAdd(rejectionBalance, msi.Balance)
|
||||
msigVotes[ms] = Reject
|
||||
}
|
||||
} else { //threshold not met, add to pending vote
|
||||
if vote.OptionID == Approve {
|
||||
msigPendingVotes[ms] = msigVote{
|
||||
Multisig: msi,
|
||||
ApproveCount: 1,
|
||||
}
|
||||
} else {
|
||||
msigPendingVotes[ms] = msigVote{
|
||||
Multisig: msi,
|
||||
RejectCount: 1,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for s, v := range msigVotes {
|
||||
if minerInfos, found := ownerMap[s]; found {
|
||||
for _, minerInfo := range minerInfos {
|
||||
minerVotes[minerInfo] = v
|
||||
}
|
||||
}
|
||||
if minerInfos, found := workerMap[s]; found {
|
||||
for _, minerInfo := range minerInfos {
|
||||
if _, ok := minerVotes[minerInfo]; !ok {
|
||||
minerVotes[minerInfo] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
approveRBP := big.Zero()
|
||||
approveDealPower := big.Zero()
|
||||
rejectionRBP := big.Zero()
|
||||
rejectionDealPower := big.Zero()
|
||||
fmt.Println("adding up miner votes")
|
||||
for minerAddr, vote := range minerVotes {
|
||||
mbi, ok := minerActorsInfo[minerAddr]
|
||||
if !ok {
|
||||
return xerrors.Errorf("failed to find miner info for %s", minerAddr)
|
||||
}
|
||||
|
||||
if vote == Approve {
|
||||
approveBalance = big.Add(approveBalance, mbi.balance)
|
||||
approveRBP = big.Add(approveRBP, mbi.rawBytePower)
|
||||
approveDealPower = big.Add(approveDealPower, mbi.dealPower)
|
||||
} else {
|
||||
rejectionBalance = big.Add(rejectionBalance, mbi.balance)
|
||||
rejectionRBP = big.Add(rejectionRBP, mbi.rawBytePower)
|
||||
rejectionDealPower = big.Add(rejectionDealPower, mbi.dealPower)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Total acceptance token: ", approveBalance)
|
||||
fmt.Println("Total rejection token: ", rejectionBalance)
|
||||
|
||||
fmt.Println("Total acceptance SP deal power: ", approveDealPower)
|
||||
fmt.Println("Total rejection SP deal power: ", rejectionDealPower)
|
||||
|
||||
fmt.Println("Total acceptance SP rb power: ", approveRBP)
|
||||
fmt.Println("Total rejection SP rb power: ", rejectionRBP)
|
||||
|
||||
fmt.Println("Total acceptance Client rb power: ", clientApproveBytes)
|
||||
fmt.Println("Total rejection Client rb power: ", clientRejectBytes)
|
||||
|
||||
fmt.Println("\n\nFinal results **drumroll**")
|
||||
if rejectionBalance.GreaterThanEqual(big.Mul(approveBalance, big.NewInt(3))) {
|
||||
fmt.Println("token holders VETO FIP-0036!")
|
||||
} else if approveBalance.LessThanEqual(rejectionBalance) {
|
||||
fmt.Println("token holders REJECT FIP-0036")
|
||||
} else {
|
||||
fmt.Println("token holders ACCEPT FIP-0036")
|
||||
}
|
||||
|
||||
if rejectionDealPower.GreaterThanEqual(big.Mul(approveDealPower, big.NewInt(3))) {
|
||||
fmt.Println("SPs by deal data stored VETO FIP-0036!")
|
||||
} else if approveDealPower.LessThanEqual(rejectionDealPower) {
|
||||
fmt.Println("SPs by deal data stored REJECT FIP-0036")
|
||||
} else {
|
||||
fmt.Println("SPs by deal data stored ACCEPT FIP-0036")
|
||||
}
|
||||
|
||||
if rejectionRBP.GreaterThanEqual(big.Mul(approveRBP, big.NewInt(3))) {
|
||||
fmt.Println("SPs by total raw byte power VETO FIP-0036!")
|
||||
} else if approveRBP.LessThanEqual(rejectionRBP) {
|
||||
fmt.Println("SPs by total raw byte power REJECT FIP-0036")
|
||||
} else {
|
||||
fmt.Println("SPs by total raw byte power ACCEPT FIP-0036")
|
||||
}
|
||||
|
||||
if clientRejectBytes.GreaterThanEqual(big.Mul(clientApproveBytes, big.NewInt(3))) {
|
||||
fmt.Println("Storage Clients VETO FIP-0036!")
|
||||
} else if clientApproveBytes.LessThanEqual(clientRejectBytes) {
|
||||
fmt.Println("Storage Clients REJECT FIP-0036")
|
||||
} else {
|
||||
fmt.Println("Storage Clients ACCEPT FIP-0036")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Returns voted sorted by votes from earliest to latest
|
||||
func getVotesMap(file string) ([]Vote, error) {
|
||||
var votes []Vote
|
||||
vb, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read vote: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(vb, &votes); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal vote: %w", err)
|
||||
}
|
||||
|
||||
sort.SliceStable(votes, func(i, j int) bool {
|
||||
return votes[i].ID < votes[j].ID
|
||||
})
|
||||
|
||||
return votes, nil
|
||||
}
|
@ -73,6 +73,8 @@ func main() {
|
||||
migrationsCmd,
|
||||
diffCmd,
|
||||
itestdCmd,
|
||||
msigCmd,
|
||||
fip36PollCmd,
|
||||
invariantsCmd,
|
||||
}
|
||||
|
||||
|
131
cmd/lotus-shed/msig.go
Normal file
131
cmd/lotus-shed/msig.go
Normal file
@ -0,0 +1,131 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/multisig"
|
||||
"github.com/filecoin-project/lotus/chain/consensus/filcns"
|
||||
"github.com/filecoin-project/lotus/chain/state"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/repo"
|
||||
)
|
||||
|
||||
type msigBriefInfo struct {
|
||||
ID address.Address
|
||||
Signer []address.Address
|
||||
Balance abi.TokenAmount
|
||||
Threshold uint64
|
||||
}
|
||||
|
||||
var msigCmd = &cli.Command{
|
||||
Name: "msig",
|
||||
Subcommands: []*cli.Command{
|
||||
multisigGetAllCmd,
|
||||
},
|
||||
}
|
||||
|
||||
var multisigGetAllCmd = &cli.Command{
|
||||
Name: "all",
|
||||
Usage: "get all multisig actor on chain with id, signers, threshold and balance at a tipset",
|
||||
ArgsUsage: "[state root]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "repo",
|
||||
Value: "~/.lotus",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ctx := context.TODO()
|
||||
if !cctx.Args().Present() {
|
||||
return fmt.Errorf("must pass state root")
|
||||
}
|
||||
|
||||
sroot, err := cid.Decode(cctx.Args().First())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse input: %w", err)
|
||||
}
|
||||
|
||||
fsrepo, err := repo.NewFS(cctx.String("repo"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lkrepo, err := fsrepo.Lock(repo.FullNode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer lkrepo.Close() //nolint:errcheck
|
||||
|
||||
bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open blockstore: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if c, ok := bs.(io.Closer); ok {
|
||||
if err := c.Close(); err != nil {
|
||||
log.Warnf("failed to close blockstore: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mds, err := lkrepo.Datastore(context.Background(), "/metadata")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil)
|
||||
defer cs.Close() //nolint:errcheck
|
||||
|
||||
cst := cbor.NewCborStore(bs)
|
||||
store := adt.WrapStore(ctx, cst)
|
||||
|
||||
tree, err := state.LoadStateTree(cst, sroot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var msigActorsInfo []msigBriefInfo
|
||||
err = tree.ForEach(func(addr address.Address, act *types.Actor) error {
|
||||
if builtin.IsMultisigActor(act.Code) {
|
||||
ms, err := multisig.Load(store, act)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load msig failed %v", err)
|
||||
|
||||
}
|
||||
|
||||
signers, _ := ms.Signers()
|
||||
threshold, _ := ms.Threshold()
|
||||
info := msigBriefInfo{
|
||||
ID: addr,
|
||||
Signer: signers,
|
||||
Balance: act.Balance,
|
||||
Threshold: threshold,
|
||||
}
|
||||
msigActorsInfo = append(msigActorsInfo, info)
|
||||
|
||||
}
|
||||
return nil
|
||||
})
|
||||
out, err := json.MarshalIndent(msigActorsInfo, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(out))
|
||||
return nil
|
||||
},
|
||||
}
|
@ -31,12 +31,7 @@ var createSimCommand = &cli.Command{
|
||||
}
|
||||
ts = node.Chainstore.GetHeaviestTipSet()
|
||||
case 1:
|
||||
cids, err := lcli.ParseTipSetString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tsk := types.NewTipSetKey(cids...)
|
||||
ts, err = node.Chainstore.LoadTipSet(cctx.Context, tsk)
|
||||
ts, err = lcli.ParseTipSetRefOffline(cctx.Context, node.Chainstore, cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -447,10 +447,10 @@ var runCmd = &cli.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
var localPaths []paths.LocalPath
|
||||
var localPaths []storiface.LocalPath
|
||||
|
||||
if !cctx.Bool("no-local-storage") {
|
||||
b, err := json.MarshalIndent(&paths.LocalStorageMeta{
|
||||
b, err := json.MarshalIndent(&storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: true,
|
||||
@ -464,12 +464,12 @@ var runCmd = &cli.Command{
|
||||
return xerrors.Errorf("persisting storage metadata (%s): %w", filepath.Join(lr.Path(), "sectorstore.json"), err)
|
||||
}
|
||||
|
||||
localPaths = append(localPaths, paths.LocalPath{
|
||||
localPaths = append(localPaths, storiface.LocalPath{
|
||||
Path: lr.Path(),
|
||||
})
|
||||
}
|
||||
|
||||
if err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||
if err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, localPaths...)
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("set storage config: %w", err)
|
||||
|
@ -92,8 +92,8 @@ func (w *Worker) StorageAddLocal(ctx context.Context, path string) error {
|
||||
return xerrors.Errorf("opening local path: %w", err)
|
||||
}
|
||||
|
||||
if err := w.Storage.SetStorage(func(sc *paths.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, paths.LocalPath{Path: path})
|
||||
if err := w.Storage.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: path})
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("get storage config: %w", err)
|
||||
}
|
||||
@ -127,8 +127,8 @@ func (w *Worker) StorageDetachLocal(ctx context.Context, path string) error {
|
||||
|
||||
// drop from the persisted storage.json
|
||||
var found bool
|
||||
if err := w.Storage.SetStorage(func(sc *paths.StorageConfig) {
|
||||
out := make([]paths.LocalPath, 0, len(sc.StoragePaths))
|
||||
if err := w.Storage.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
out := make([]storiface.LocalPath, 0, len(sc.StoragePaths))
|
||||
for _, storagePath := range sc.StoragePaths {
|
||||
if storagePath.Path != path {
|
||||
out = append(out, storagePath)
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
@ -103,7 +102,7 @@ var storageAttachCmd = &cli.Command{
|
||||
}
|
||||
}
|
||||
|
||||
cfg := &paths.LocalStorageMeta{
|
||||
cfg := &storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: cctx.Uint64("weight"),
|
||||
CanSeal: cctx.Bool("seal"),
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
format "github.com/ipfs/go-ipld-format"
|
||||
"github.com/ipfs/go-merkledag"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v0api"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
@ -158,3 +159,12 @@ func (pb *proxyingBlockstore) PutMany(ctx context.Context, blocks []blocks.Block
|
||||
pb.lk.Unlock()
|
||||
return pb.Blockstore.PutMany(ctx, blocks)
|
||||
}
|
||||
|
||||
func (pb *proxyingBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||
blk, err := pb.Get(ctx, c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to Get cid %s: %w", c, err)
|
||||
}
|
||||
|
||||
return callback(blk.RawData())
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params
|
||||
return big.Zero(), nil
|
||||
}
|
||||
|
||||
return vm.NewLegacyVM(ctx, vmopt)
|
||||
return vm.NewVM(ctx, vmopt)
|
||||
})
|
||||
|
||||
postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(),
|
||||
@ -242,23 +242,38 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
|
||||
LookbackState: lookback,
|
||||
}
|
||||
|
||||
lvm, err := vm.NewLegacyVM(context.TODO(), vmOpts)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
|
||||
invoker := filcns.NewActorRegistry()
|
||||
|
||||
// register the chaos actor if required by the vector.
|
||||
var vmi vm.Interface
|
||||
if chaosOn, ok := d.selector["chaos_actor"]; ok && chaosOn == "true" {
|
||||
lvm, err := vm.NewLegacyVM(context.TODO(), vmOpts)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
|
||||
invoker := filcns.NewActorRegistry()
|
||||
av, _ := actorstypes.VersionForNetwork(params.NetworkVersion)
|
||||
registry := builtin.MakeRegistryLegacy([]rtt.VMActor{chaos.Actor{}})
|
||||
invoker.Register(av, nil, registry)
|
||||
lvm.SetInvoker(invoker)
|
||||
vmi = lvm
|
||||
} else {
|
||||
if vmOpts.NetworkVersion >= network.Version16 {
|
||||
fvm, err := vm.NewFVM(context.TODO(), vmOpts)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
vmi = fvm
|
||||
} else {
|
||||
lvm, err := vm.NewLegacyVM(context.TODO(), vmOpts)
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
invoker := filcns.NewActorRegistry()
|
||||
lvm.SetInvoker(invoker)
|
||||
vmi = lvm
|
||||
}
|
||||
}
|
||||
|
||||
lvm.SetInvoker(invoker)
|
||||
|
||||
ret, err := lvm.ApplyMessage(d.ctx, toChainMsg(params.Message))
|
||||
ret, err := vmi.ApplyMessage(d.ctx, toChainMsg(params.Message))
|
||||
if err != nil {
|
||||
return nil, cid.Undef, err
|
||||
}
|
||||
@ -266,10 +281,10 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP
|
||||
var root cid.Cid
|
||||
if d.vmFlush {
|
||||
// flush the VM, committing the state tree changes and forcing a
|
||||
// recursive copoy from the temporary blcokstore to the real blockstore.
|
||||
root, err = lvm.Flush(d.ctx)
|
||||
// recursive copy from the temporary blockstore to the real blockstore.
|
||||
root, err = vmi.Flush(d.ctx)
|
||||
} else {
|
||||
root, err = lvm.StateTree().(*state.StateTree).Flush(d.ctx)
|
||||
root, err = vmi.(*vm.LegacyVM).StateTree().(*state.StateTree).Flush(d.ctx)
|
||||
}
|
||||
|
||||
return ret, root, err
|
||||
|
@ -167,6 +167,8 @@
|
||||
* [SectorsSummary](#SectorsSummary)
|
||||
* [SectorsUnsealPiece](#SectorsUnsealPiece)
|
||||
* [SectorsUpdate](#SectorsUpdate)
|
||||
* [Start](#Start)
|
||||
* [StartTime](#StartTime)
|
||||
* [Storage](#Storage)
|
||||
* [StorageAddLocal](#StorageAddLocal)
|
||||
* [StorageAttach](#StorageAttach)
|
||||
@ -413,8 +415,7 @@ Inputs:
|
||||
},
|
||||
"ProofType": 8
|
||||
}
|
||||
],
|
||||
true
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
@ -3623,6 +3624,18 @@ Inputs:
|
||||
|
||||
Response: `{}`
|
||||
|
||||
## Start
|
||||
|
||||
|
||||
### StartTime
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `"0001-01-01T00:00:00Z"`
|
||||
|
||||
## Storage
|
||||
|
||||
|
||||
|
@ -156,6 +156,8 @@
|
||||
* [PaychVoucherCreate](#PaychVoucherCreate)
|
||||
* [PaychVoucherList](#PaychVoucherList)
|
||||
* [PaychVoucherSubmit](#PaychVoucherSubmit)
|
||||
* [Start](#Start)
|
||||
* [StartTime](#StartTime)
|
||||
* [State](#State)
|
||||
* [StateAccountKey](#StateAccountKey)
|
||||
* [StateActorCodeCIDs](#StateActorCodeCIDs)
|
||||
@ -4620,6 +4622,18 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
## Start
|
||||
|
||||
|
||||
### StartTime
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `"0001-01-01T00:00:00Z"`
|
||||
|
||||
## State
|
||||
The State methods are used to query, inspect, and interact with chain state.
|
||||
Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||
|
@ -164,6 +164,11 @@
|
||||
* [PaychVoucherCreate](#PaychVoucherCreate)
|
||||
* [PaychVoucherList](#PaychVoucherList)
|
||||
* [PaychVoucherSubmit](#PaychVoucherSubmit)
|
||||
* [Raft](#Raft)
|
||||
* [RaftLeader](#RaftLeader)
|
||||
* [RaftState](#RaftState)
|
||||
* [Start](#Start)
|
||||
* [StartTime](#StartTime)
|
||||
* [State](#State)
|
||||
* [StateAccountKey](#StateAccountKey)
|
||||
* [StateActorCodeCIDs](#StateActorCodeCIDs)
|
||||
@ -1990,7 +1995,8 @@ Inputs:
|
||||
"Address": "f01234",
|
||||
"ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf",
|
||||
"PieceCID": null
|
||||
}
|
||||
},
|
||||
"RemoteStore": "00000000-0000-0000-0000-000000000000"
|
||||
}
|
||||
]
|
||||
```
|
||||
@ -5052,6 +5058,45 @@ Response:
|
||||
}
|
||||
```
|
||||
|
||||
## Raft
|
||||
|
||||
|
||||
### RaftLeader
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `"12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf"`
|
||||
|
||||
### RaftState
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response:
|
||||
```json
|
||||
{
|
||||
"NonceMap": {},
|
||||
"MsgUuids": {}
|
||||
}
|
||||
```
|
||||
|
||||
## Start
|
||||
|
||||
|
||||
### StartTime
|
||||
|
||||
|
||||
Perms: read
|
||||
|
||||
Inputs: `null`
|
||||
|
||||
Response: `"0001-01-01T00:00:00Z"`
|
||||
|
||||
## State
|
||||
The State methods are used to query, inspect, and interact with chain state.
|
||||
Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||
|
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus-miner [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.18.2
|
||||
1.19.0
|
||||
|
||||
COMMANDS:
|
||||
init Initialize a lotus miner repo
|
||||
@ -1241,7 +1241,7 @@ NAME:
|
||||
lotus-miner net ping - Ping peers
|
||||
|
||||
USAGE:
|
||||
lotus-miner net ping [command options] [arguments...]
|
||||
lotus-miner net ping [command options] [peerMultiaddr]
|
||||
|
||||
OPTIONS:
|
||||
--count value, -c value specify the number of times it should ping (default: 10)
|
||||
|
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus-worker [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.18.2
|
||||
1.19.0
|
||||
|
||||
COMMANDS:
|
||||
run Start lotus worker
|
||||
|
@ -7,7 +7,7 @@ USAGE:
|
||||
lotus [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
1.18.2
|
||||
1.19.0
|
||||
|
||||
COMMANDS:
|
||||
daemon Start a lotus daemon process
|
||||
@ -2087,6 +2087,7 @@ COMMANDS:
|
||||
decode decode various types
|
||||
encode encode various types
|
||||
disputer interact with the window post disputer
|
||||
prune prune the stored chain state and perform garbage collection
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
OPTIONS:
|
||||
@ -2409,6 +2410,22 @@ OPTIONS:
|
||||
|
||||
```
|
||||
|
||||
### lotus chain prune
|
||||
```
|
||||
NAME:
|
||||
lotus chain prune - prune the stored chain state and perform garbage collection
|
||||
|
||||
USAGE:
|
||||
lotus chain prune [command options] [arguments...]
|
||||
|
||||
OPTIONS:
|
||||
--move-to value specify new path for coldstore during moving gc
|
||||
--moving-gc use moving gc for garbage collecting the coldstore (default: false)
|
||||
--online-gc use online gc for garbage collecting the coldstore (default: false)
|
||||
--retention value specify state retention policy (default: -1)
|
||||
|
||||
```
|
||||
|
||||
## lotus log
|
||||
```
|
||||
NAME:
|
||||
@ -2570,7 +2587,7 @@ NAME:
|
||||
lotus net ping - Ping peers
|
||||
|
||||
USAGE:
|
||||
lotus net ping [command options] [arguments...]
|
||||
lotus net ping [command options] [peerMultiaddr]
|
||||
|
||||
OPTIONS:
|
||||
--count value, -c value specify the number of times it should ping (default: 10)
|
||||
|
@ -166,11 +166,11 @@
|
||||
|
||||
[Chainstore.Splitstore]
|
||||
# ColdStoreType specifies the type of the coldstore.
|
||||
# It can be "universal" (default) or "discard" for discarding cold blocks.
|
||||
# It can be "messages" (default) to store only messages, "universal" to store all chain state or "discard" for discarding cold blocks.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE
|
||||
#ColdStoreType = "universal"
|
||||
#ColdStoreType = "messages"
|
||||
|
||||
# HotStoreType specifies the type of the hotstore.
|
||||
# Only currently supported value is "badger".
|
||||
@ -201,28 +201,66 @@
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY
|
||||
#HotStoreFullGCFrequency = 20
|
||||
|
||||
# EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||
# where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||
# Default is false
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_ENABLECOLDSTOREAUTOPRUNE
|
||||
#EnableColdStoreAutoPrune = false
|
||||
|
||||
# ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||
# Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||
# full GC in every prune.
|
||||
# Default is 7 (about once every a week)
|
||||
#
|
||||
# type: uint64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTOREFULLGCFREQUENCY
|
||||
#ColdStoreFullGCFrequency = 7
|
||||
[Cluster]
|
||||
# EXPERIMENTAL. config to enabled node cluster with raft consensus
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLUSTER_CLUSTERMODEENABLED
|
||||
#ClusterModeEnabled = false
|
||||
|
||||
# ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||
# finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||
#
|
||||
# type: int64
|
||||
# env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORERETENTION
|
||||
#ColdStoreRetention = 0
|
||||
# A folder to store Raft's data.
|
||||
#
|
||||
# type: string
|
||||
# env var: LOTUS_CLUSTER_DATAFOLDER
|
||||
#DataFolder = ""
|
||||
|
||||
# InitPeersetMultiAddr provides the list of initial cluster peers for new Raft
|
||||
# peers (with no prior state). It is ignored when Raft was already
|
||||
# initialized or when starting in staging mode.
|
||||
#
|
||||
# type: []string
|
||||
# env var: LOTUS_CLUSTER_INITPEERSETMULTIADDR
|
||||
#InitPeersetMultiAddr = []
|
||||
|
||||
# LeaderTimeout specifies how long to wait for a leader before
|
||||
# failing an operation.
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_WAITFORLEADERTIMEOUT
|
||||
#WaitForLeaderTimeout = "15s"
|
||||
|
||||
# NetworkTimeout specifies how long before a Raft network
|
||||
# operation is timed out
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_NETWORKTIMEOUT
|
||||
#NetworkTimeout = "1m40s"
|
||||
|
||||
# CommitRetries specifies how many times we retry a failed commit until
|
||||
# we give up.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_CLUSTER_COMMITRETRIES
|
||||
#CommitRetries = 1
|
||||
|
||||
# How long to wait between retries
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_CLUSTER_COMMITRETRYDELAY
|
||||
#CommitRetryDelay = "200ms"
|
||||
|
||||
# BackupsRotate specifies the maximum number of Raft's DataFolder
|
||||
# copies that we keep as backups (renaming) after cleanup.
|
||||
#
|
||||
# type: int
|
||||
# env var: LOTUS_CLUSTER_BACKUPSROTATE
|
||||
#BackupsRotate = 6
|
||||
|
||||
# Tracing enables propagation of contexts across binary boundaries.
|
||||
#
|
||||
# type: bool
|
||||
# env var: LOTUS_CLUSTER_TRACING
|
||||
#Tracing = false
|
||||
|
||||
|
||||
|
@ -325,6 +325,29 @@
|
||||
# env var: LOTUS_PROVING_PARALLELCHECKLIMIT
|
||||
#ParallelCheckLimit = 128
|
||||
|
||||
# Maximum amount of time a proving pre-check can take for a sector. If the check times out the sector will be skipped
|
||||
#
|
||||
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
# test challenge took longer than this timeout
|
||||
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this sector are
|
||||
# blocked (e.g. in case of disconnected NFS mount)
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_PROVING_SINGLECHECKTIMEOUT
|
||||
#SingleCheckTimeout = "10m0s"
|
||||
|
||||
# Maximum amount of time a proving pre-check can take for an entire partition. If the check times out, sectors in
|
||||
# the partition which didn't get checked on time will be skipped
|
||||
#
|
||||
# WARNING: Setting this value too low risks in sectors being skipped even though they are accessible, just reading the
|
||||
# test challenge took longer than this timeout
|
||||
# WARNING: Setting this value too high risks missing PoSt deadline in case IO operations related to this partition are
|
||||
# blocked or slow
|
||||
#
|
||||
# type: Duration
|
||||
# env var: LOTUS_PROVING_PARTITIONCHECKTIMEOUT
|
||||
#PartitionCheckTimeout = "20m0s"
|
||||
|
||||
# Disable Window PoSt computation on the lotus-miner process even if no window PoSt workers are present.
|
||||
#
|
||||
# WARNING: If no windowPoSt workers are connected, window PoSt WILL FAIL resulting in faulty sectors which will need
|
||||
@ -694,7 +717,7 @@
|
||||
# to use when evaluating tasks against this worker. An empty value defaults
|
||||
# to "hardware".
|
||||
#
|
||||
# type: sealer.ResourceFilteringStrategy
|
||||
# type: ResourceFilteringStrategy
|
||||
# env var: LOTUS_STORAGE_RESOURCEFILTERING
|
||||
#ResourceFiltering = "hardware"
|
||||
|
||||
|
@ -21,11 +21,12 @@
|
||||
First steps:
|
||||
|
||||
- [ ] Fork a new branch (`release/vX.Y.Z`) from `master` and make any further release related changes to this branch. If any "non-trivial" changes get added to the release, uncheck all the checkboxes and return to this stage.
|
||||
- [ ] Bump the version in `version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release)
|
||||
- [ ] Bump the version in `build/version.go` in the `master` branch to `vX.Y.(Z+1)-dev` (bump from feature release) or `vX.(Y+1).0-dev` (bump from mandatory release)
|
||||
|
||||
Prepping an RC:
|
||||
|
||||
- [ ] version string in `build/version.go` has been updated (in the `release/vX.Y.Z` branch).
|
||||
- [ ] run `make gen && make docsgen-cli`
|
||||
- [ ] tag commit with `vX.Y.Z-rcN`
|
||||
- [ ] cut a pre-release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true)
|
||||
|
||||
@ -66,14 +67,14 @@ Testing an RC:
|
||||
- [ ] Update the [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) to the state that can be used as release note.
|
||||
- [ ] Invite the wider community through (link to the release issue)
|
||||
|
||||
- [ ] **Stage 4 - Release**
|
||||
- [ ] **Stage 4 - Stable Release**
|
||||
- [ ] Final preparation
|
||||
- [ ] Verify that version string in [`version.go`](https://github.com/ipfs/go-ipfs/tree/master/version.go) has been updated.
|
||||
- [ ] Prep the changelog using `scripts/mkreleaselog`, and add it to `CHANGELOG.md`. Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
|
||||
- [ ] Verify that version string in [`version.go`](https://github.com/filecoin-project/lotus/blob/master/build/version.go) has been updated.
|
||||
- [ ] Verify that codegen is up to date (`make gen && make docsgen-cli`)
|
||||
- [ ] Ensure that [CHANGELOG.md](https://github.com/filecoin-project/lotus/blob/master/CHANGELOG.md) is up to date
|
||||
- [ ] Merge `release-vX.Y.Z` into the `releases` branch.
|
||||
- [ ] Tag this merge commit (on the `releases` branch) with `vX.Y.Z`
|
||||
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=true&target=releases).
|
||||
- [ ] Check `Create a discussion for this release`
|
||||
- [ ] Cut the release [here](https://github.com/filecoin-project/lotus/releases/new?prerelease=false&target=releases).
|
||||
|
||||
|
||||
- [ ] **Post-Release**
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit 20f104e88065aae59fd212d64be5bed108604e78
|
||||
Subproject commit 280c4f8b94fd46dc824a5c827dece73ec7fe3efd
|
10
gen/main.go
10
gen/main.go
@ -7,6 +7,7 @@ import (
|
||||
gen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/exchange"
|
||||
"github.com/filecoin-project/lotus/chain/market"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
@ -127,4 +128,13 @@ func main() {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
err = gen.WriteTupleEncodersToFile("./blockstore/cbor_gen.go", "blockstore",
|
||||
blockstore.NetRpcReq{},
|
||||
blockstore.NetRpcResp{},
|
||||
blockstore.NetRpcErr{},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
29
go.mod
29
go.mod
@ -29,7 +29,7 @@ require (
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/filecoin-project/dagstore v0.5.2
|
||||
github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f
|
||||
github.com/filecoin-project/go-address v1.0.0
|
||||
github.com/filecoin-project/go-address v1.1.0
|
||||
github.com/filecoin-project/go-bitfield v0.2.4
|
||||
github.com/filecoin-project/go-cbor-util v0.0.1
|
||||
github.com/filecoin-project/go-commp-utils v0.1.3
|
||||
@ -37,12 +37,12 @@ require (
|
||||
github.com/filecoin-project/go-data-transfer v1.15.2
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0
|
||||
github.com/filecoin-project/go-jsonrpc v0.1.8
|
||||
github.com/filecoin-project/go-legs v0.4.4
|
||||
github.com/filecoin-project/go-padreader v0.0.1
|
||||
github.com/filecoin-project/go-paramfetch v0.0.4
|
||||
github.com/filecoin-project/go-state-types v0.9.8
|
||||
github.com/filecoin-project/go-state-types v0.10.0-alpha-2
|
||||
github.com/filecoin-project/go-statemachine v1.0.2
|
||||
github.com/filecoin-project/go-statestore v0.2.0
|
||||
github.com/filecoin-project/go-storedcounter v0.1.0
|
||||
@ -63,10 +63,13 @@ require (
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.7.4
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||
github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/hashicorp/raft v1.1.1
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea
|
||||
github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||
github.com/ipfs/bbloom v0.0.4
|
||||
@ -94,14 +97,14 @@ require (
|
||||
github.com/ipfs/go-ipld-cbor v0.0.6
|
||||
github.com/ipfs/go-ipld-format v0.4.0
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/ipfs/go-merkledag v0.8.0
|
||||
github.com/ipfs/go-merkledag v0.8.1
|
||||
github.com/ipfs/go-metrics-interface v0.0.1
|
||||
github.com/ipfs/go-metrics-prometheus v0.0.2
|
||||
github.com/ipfs/go-unixfs v0.3.1
|
||||
github.com/ipfs/go-unixfsnode v1.4.0
|
||||
github.com/ipfs/interface-go-ipfs-core v0.7.0
|
||||
github.com/ipld/go-car v0.4.0
|
||||
github.com/ipld/go-car/v2 v2.4.1
|
||||
github.com/ipld/go-car/v2 v2.5.0
|
||||
github.com/ipld/go-codec-dagpb v1.3.2
|
||||
github.com/ipld/go-ipld-prime v0.17.0
|
||||
github.com/ipld/go-ipld-selector-text-lite v0.0.1
|
||||
@ -109,12 +112,16 @@ require (
|
||||
github.com/koalacxr/quantile v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
github.com/libp2p/go-libp2p v0.22.0
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.18.0
|
||||
github.com/libp2p/go-libp2p-peerstore v0.8.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.8.0
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8
|
||||
github.com/libp2p/go-libp2p-record v0.2.0
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.2.3
|
||||
github.com/libp2p/go-maddr-filter v0.1.0
|
||||
github.com/libp2p/go-msgio v0.2.0
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
@ -145,6 +152,7 @@ require (
|
||||
go.uber.org/fx v1.15.0
|
||||
go.uber.org/multierr v1.8.0
|
||||
go.uber.org/zap v1.22.0
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5
|
||||
golang.org/x/net v0.0.0-20220812174116-3211cb980234
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
|
||||
@ -163,9 +171,11 @@ require (
|
||||
github.com/Stebalien/go-bitfield v0.0.1 // indirect
|
||||
github.com/akavel/rsrc v0.8.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a // indirect
|
||||
github.com/armon/go-metrics v0.3.9 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bep/debounce v1.2.0 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
@ -213,9 +223,11 @@ require (
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-hclog v0.16.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
github.com/iancoleman/orderedmap v0.1.0 // indirect
|
||||
github.com/ipfs/go-bitfield v1.0.0 // indirect
|
||||
@ -254,7 +266,6 @@ require (
|
||||
github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect
|
||||
github.com/libp2p/go-libp2p-noise v0.5.0 // indirect
|
||||
github.com/libp2p/go-libp2p-tls v0.5.0 // indirect
|
||||
github.com/libp2p/go-msgio v0.2.0 // indirect
|
||||
github.com/libp2p/go-nat v0.1.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.2.0 // indirect
|
||||
github.com/libp2p/go-openssl v0.1.0 // indirect
|
||||
@ -304,6 +315,7 @@ require (
|
||||
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
|
||||
github.com/ugorji/go/codec v1.2.6 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.0.1 // indirect
|
||||
github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect
|
||||
@ -311,7 +323,7 @@ require (
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
||||
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/zondax/hid v0.9.0 // indirect
|
||||
github.com/zondax/hid v0.9.1 // indirect
|
||||
github.com/zondax/ledger-go v0.12.1 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.25.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.25.0 // indirect
|
||||
@ -320,7 +332,6 @@ require (
|
||||
go.uber.org/dig v1.12.0 // indirect
|
||||
go4.org v0.0.0-20200411211856-f5505b9728dd // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
|
78
go.sum
78
go.sum
@ -49,6 +49,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
|
||||
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=
|
||||
@ -99,6 +101,9 @@ github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18=
|
||||
github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
@ -120,6 +125,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=
|
||||
github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8=
|
||||
@ -164,6 +171,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/cilium/ebpf v0.4.0 h1:QlHdikaxALkqWasW8hAC1mfR0jdmvbfaBdBPFmRSglA=
|
||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@ -286,8 +295,8 @@ github.com/filecoin-project/dagstore v0.5.2/go.mod h1:mdqKzYrRBHf1pRMthYfMv3n37o
|
||||
github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8=
|
||||
github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE=
|
||||
github.com/filecoin-project/go-address v1.0.0 h1:IrexI0kpADLaPP+CdmU3CVAUqnW/FQC0KTmz4lVKiFU=
|
||||
github.com/filecoin-project/go-address v1.0.0/go.mod h1:5t3z6qPmIADZBtuE9EIzi0EwzcRy2nVhpo0I/c1r0OA=
|
||||
github.com/filecoin-project/go-address v1.1.0 h1:ofdtUtEsNxkIxkDw67ecSmvtzaVSdcea4boAmLbnHfE=
|
||||
github.com/filecoin-project/go-address v1.1.0/go.mod h1:5t3z6qPmIADZBtuE9EIzi0EwzcRy2nVhpo0I/c1r0OA=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM=
|
||||
github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o=
|
||||
@ -318,8 +327,8 @@ github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88Oq
|
||||
github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ=
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo=
|
||||
github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8=
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17 h1:YjT0usMeR6kdAo3RBfftTPe5bNIgNmBbo5YzJHF1iLk=
|
||||
github.com/filecoin-project/go-fil-markets v1.24.0-v17/go.mod h1:JW/UHkHDqP4MikCIIWNY5IHvTTsdv/zNMk9jJXKzhIU=
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0 h1:zWkc1v84JL9KttiqOy2IIZB0jksIdAt1WLCdOP/KvAg=
|
||||
github.com/filecoin-project/go-fil-markets v1.25.0/go.mod h1:3lzXZt5mRHTHAmZ10sUviiutaLVL57B99FgBU1MYqWY=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM=
|
||||
github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24=
|
||||
github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM=
|
||||
@ -343,8 +352,8 @@ github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psS
|
||||
github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.1.8/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.9.8 h1:xkdITiR7h691z1tWOhNCJxHI+cq+Mq7ATkpHQ7f1gu8=
|
||||
github.com/filecoin-project/go-state-types v0.9.8/go.mod h1:+HCZifUV+e8TlQkgll22Ucuiq8OrVJkK+4Kh4u75iiw=
|
||||
github.com/filecoin-project/go-state-types v0.10.0-alpha-2 h1:xz8+sXAuCMane7SkEYCtQjD/zYJ4n1d5bxwYNL8Thf0=
|
||||
github.com/filecoin-project/go-state-types v0.10.0-alpha-2/go.mod h1:7ty480tvttEAqWKywhAaDCElk7ksTqEXtXWAzTSdEKo=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
github.com/filecoin-project/go-statemachine v1.0.2 h1:421SSWBk8GIoCoWYYTE/d+qCWccgmRH0uXotXRDjUbc=
|
||||
github.com/filecoin-project/go-statemachine v1.0.2/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54=
|
||||
@ -380,6 +389,7 @@ github.com/filecoin-project/storetheindex v0.4.17/go.mod h1:y2dL8C5D3PXi183hdxgG
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg=
|
||||
github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
|
||||
@ -593,17 +603,27 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs=
|
||||
github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
@ -615,6 +635,10 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=
|
||||
github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea h1:xykPFhrBAS2J0VBzVa5e80b5ZtYuNQtgXjN40qBZlD4=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@ -805,8 +829,8 @@ github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB
|
||||
github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M=
|
||||
github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4=
|
||||
github.com/ipfs/go-merkledag v0.6.0/go.mod h1:9HSEwRd5sV+lbykiYP+2NC/3o6MZbKNaa4hfNcH5iH0=
|
||||
github.com/ipfs/go-merkledag v0.8.0 h1:ZUda+sh/MGZX4Z13DE/VQT4GmKWm4H95Nje4qcL/yPE=
|
||||
github.com/ipfs/go-merkledag v0.8.0/go.mod h1:/RmH1kOs7qDMNtGKPh4d/UErNMVuAMpPS/tP57a3aoY=
|
||||
github.com/ipfs/go-merkledag v0.8.1 h1:N3yrqSre/ffvdwtHL4MXy0n7XH+VzN8DlzDrJySPa94=
|
||||
github.com/ipfs/go-merkledag v0.8.1/go.mod h1:uYUlWE34GhbcTjGuUDEcdPzsEtOdnOupL64NgSRjmWI=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=
|
||||
github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY=
|
||||
github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s=
|
||||
@ -837,8 +861,8 @@ github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBH
|
||||
github.com/ipld/go-car v0.4.0 h1:U6W7F1aKF/OJMHovnOVdst2cpQE5GhmHibQkAixgNcQ=
|
||||
github.com/ipld/go-car v0.4.0/go.mod h1:Uslcn4O9cBKK9wqHm/cLTFacg6RAPv6LZx2mxd2Ypl4=
|
||||
github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI=
|
||||
github.com/ipld/go-car/v2 v2.4.1 h1:9S+FYbQzQJ/XzsdiOV13W5Iu/i+gUnr6csbSD9laFEg=
|
||||
github.com/ipld/go-car/v2 v2.4.1/go.mod h1:zjpRf0Jew9gHqSvjsKVyoq9OY9SWoEKdYCQUKVaaPT0=
|
||||
github.com/ipld/go-car/v2 v2.5.0 h1:S9h7A6qBAJ+B1M1jIKtau+HPDe30UbM71vsyBzwvRIE=
|
||||
github.com/ipld/go-car/v2 v2.5.0/go.mod h1:jKjGOqoCj5zn6KjnabD6JbnCsMntqU2hLiU6baZVO3E=
|
||||
github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s=
|
||||
github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA=
|
||||
github.com/ipld/go-codec-dagpb v1.3.1/go.mod h1:ErNNglIi5KMur/MfFE/svtgQthzVvf+43MrzLbpcIZY=
|
||||
@ -979,7 +1003,9 @@ github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZk
|
||||
github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k=
|
||||
github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw=
|
||||
github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o=
|
||||
github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo=
|
||||
github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0=
|
||||
github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM=
|
||||
github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4=
|
||||
github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw=
|
||||
github.com/libp2p/go-libp2p v0.22.0 h1:2Tce0kHOp5zASFKJbNzRElvh0iZwdtG5uZheNW8chIw=
|
||||
@ -993,6 +1019,7 @@ github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI=
|
||||
github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
|
||||
github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk=
|
||||
github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o=
|
||||
github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg=
|
||||
@ -1010,6 +1037,8 @@ github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQ
|
||||
github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.4.0 h1:q/KZUS1iMDIQckMZarMYwhQisJqiFPHAVC1c4DR3hDE=
|
||||
github.com/libp2p/go-libp2p-connmgr v0.4.0/go.mod h1:exFQQm19PFAx+QuJmBPw4MM58QejzPJRFFFYnNmgi2w=
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1 h1:jcVbHRZLwTXU9iT/mPi+Lx4/OrIzq3bU1TbZNhYFCV8=
|
||||
github.com/libp2p/go-libp2p-consensus v0.0.1/go.mod h1:+9Wrfhc5QOqWB0gXI0m6ARlkHfdJpcFXmRU0WoHz4Mo=
|
||||
github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco=
|
||||
github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE=
|
||||
@ -1052,6 +1081,9 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx
|
||||
github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw=
|
||||
github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug=
|
||||
github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0 h1:kxHg5C3IuXeOq5FHPGbMHwQzKDlTVeB/NDr0ndc8J/g=
|
||||
github.com/libp2p/go-libp2p-gorpc v0.4.0/go.mod h1:jux2Mb6BfUE1n58KbVCmWtqvpiZo0DDaKobKInf4s5o=
|
||||
github.com/libp2p/go-libp2p-gostream v0.3.1/go.mod h1:1V3b+u4Zhaq407UUY9JLCpboaeufAeVQbnvAt12LRsI=
|
||||
github.com/libp2p/go-libp2p-gostream v0.4.0 h1:heduMMEB78yBqeEQv+P7Fn5X926MHC2jDIC7/7yLpYA=
|
||||
github.com/libp2p/go-libp2p-gostream v0.4.0/go.mod h1:21DVGBcCQwRfEXZpCnZ2kG24QiEkBpEQvG53gYXE4u0=
|
||||
github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go=
|
||||
@ -1082,6 +1114,7 @@ github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8
|
||||
github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c=
|
||||
github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q=
|
||||
github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU=
|
||||
github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM=
|
||||
github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q=
|
||||
github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ=
|
||||
github.com/libp2p/go-libp2p-noise v0.5.0 h1:gwJZ/3iH3MRnBrLIyr/YLdCOnmqfJMptlsFFUIc3j0Y=
|
||||
@ -1112,6 +1145,8 @@ github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqU
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
|
||||
github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ=
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8 h1:Fq0aWHbbhi6WJXf+yaOQeMzV+9UgkbHIIGyaJbH3vpo=
|
||||
github.com/libp2p/go-libp2p-raft v0.1.8/go.mod h1:+YDisn3uszb7vxshLgKoDdRGs79WSbHRgrOdrYqDPk4=
|
||||
github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q=
|
||||
github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q=
|
||||
github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk=
|
||||
@ -1131,6 +1166,7 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM=
|
||||
github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk=
|
||||
github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw=
|
||||
github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4=
|
||||
github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc=
|
||||
github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8=
|
||||
@ -1158,6 +1194,7 @@ github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw=
|
||||
github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo=
|
||||
@ -1172,6 +1209,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL
|
||||
github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4=
|
||||
github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE=
|
||||
github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k=
|
||||
github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08=
|
||||
@ -1233,7 +1271,9 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19
|
||||
github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc=
|
||||
github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY=
|
||||
github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0=
|
||||
github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M=
|
||||
github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
||||
github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU=
|
||||
github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI=
|
||||
github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I=
|
||||
github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc=
|
||||
@ -1252,6 +1292,7 @@ github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ
|
||||
github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE=
|
||||
github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U=
|
||||
github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ=
|
||||
github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs=
|
||||
github.com/libp2p/go-yamux/v3 v3.1.2 h1:lNEy28MBk1HavUAlzKgShp+F6mn/ea1nDYWftZhFW9Q=
|
||||
@ -1306,6 +1347,7 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
@ -1331,6 +1373,7 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
@ -1429,6 +1472,7 @@ github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wS
|
||||
github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg=
|
||||
github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38=
|
||||
github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k=
|
||||
github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs=
|
||||
github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o=
|
||||
@ -1502,6 +1546,8 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh
|
||||
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
|
||||
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
@ -1531,6 +1577,7 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
@ -1697,6 +1744,7 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||
@ -1704,7 +1752,12 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
|
||||
github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc=
|
||||
github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
@ -1777,8 +1830,9 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=
|
||||
github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
|
||||
github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo=
|
||||
github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM=
|
||||
github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU=
|
||||
github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo=
|
||||
go.dedis.ch/fixbuf v1.0.3 h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs=
|
||||
@ -1896,6 +1950,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
@ -2070,6 +2125,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -64,7 +64,7 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
|
||||
//stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001
|
||||
//stm: @MINER_SECTOR_LIST_001
|
||||
//kit.Expensive(t)
|
||||
kit.Expensive(t)
|
||||
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
@ -160,7 +160,7 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) {
|
||||
checkMiner := func(ma address.Address, power abi.StoragePower, active bool, tsk types.TipSetKey) {
|
||||
//stm: @CHAIN_STATE_MINER_POWER_001
|
||||
p, err := client.StateMinerPower(ctx, ma, tsk)
|
||||
require.NoError(t, err)
|
||||
@ -178,21 +178,6 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
act, err := mst.DeadlineCronActive()
|
||||
require.NoError(t, err)
|
||||
|
||||
if tsk != types.EmptyTSK {
|
||||
ts, err := client.ChainGetTipSet(ctx, tsk)
|
||||
require.NoError(t, err)
|
||||
di, err := mst.DeadlineInfo(ts.Height())
|
||||
require.NoError(t, err)
|
||||
|
||||
// cron happened on the same epoch some other condition would have happened
|
||||
if di.Open == ts.Height() {
|
||||
act, err := mst.DeadlineCronActive()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, activeIfCron, act)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, active, act)
|
||||
}
|
||||
|
||||
@ -200,7 +185,7 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
{
|
||||
uts, err := client.ChainGetTipSetByHeight(ctx, upgradeH+2, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
checkMiner(maddrB, types.NewInt(0), true, true, uts.Key())
|
||||
checkMiner(maddrB, types.NewInt(0), true, uts.Key())
|
||||
}
|
||||
|
||||
//stm: @CHAIN_STATE_NETWORK_VERSION_001
|
||||
@ -218,19 +203,19 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// first round of miner checks
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(uint64(ssz)*sectorsC), true, types.EmptyTSK)
|
||||
|
||||
checkMiner(maddrB, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(0), false, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
|
||||
|
||||
// pledge sectors on minerB/minerD, stop post on minerC
|
||||
minerB.PledgeSectors(ctx, sectorsB, 0, nil)
|
||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
|
||||
|
||||
minerD.PledgeSectors(ctx, sectorsD, 0, nil)
|
||||
checkMiner(maddrD, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), true, types.EmptyTSK)
|
||||
|
||||
minerC.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||
|
||||
@ -281,7 +266,7 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
checkMiner(maddrE, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), true, types.EmptyTSK)
|
||||
|
||||
// go through rest of the PP
|
||||
for {
|
||||
@ -296,11 +281,11 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
}
|
||||
|
||||
// second round of miner checks
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(uint64(ssz)*sectorsB), true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(uint64(ssz)*sectorsD), true, types.EmptyTSK)
|
||||
checkMiner(maddrE, types.NewInt(0), false, types.EmptyTSK)
|
||||
|
||||
// disable post on minerB
|
||||
minerB.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).Fail()
|
||||
@ -353,8 +338,10 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, exitcode.Ok, r.Receipt.ExitCode)
|
||||
|
||||
// assert inactive if the message landed in the tipset we run cron in
|
||||
checkMiner(maddrD, types.NewInt(0), true, false, r.TipSet)
|
||||
// assert miner has no power
|
||||
p, err := client.StateMinerPower(ctx, maddrD, r.TipSet)
|
||||
require.NoError(t, err)
|
||||
require.True(t, p.MinerPower.RawBytePower.IsZero())
|
||||
}
|
||||
|
||||
// go through another PP
|
||||
@ -369,8 +356,8 @@ func TestDeadlineToggling(t *testing.T) {
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(0), true, true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), false, false, types.EmptyTSK)
|
||||
checkMiner(maddrA, types.NewInt(uint64(ssz)*kit.DefaultPresealsPerBootstrapMiner), true, types.EmptyTSK)
|
||||
checkMiner(maddrC, types.NewInt(0), true, types.EmptyTSK)
|
||||
checkMiner(maddrB, types.NewInt(0), true, types.EmptyTSK)
|
||||
checkMiner(maddrD, types.NewInt(0), false, types.EmptyTSK)
|
||||
}
|
||||
|
104
itests/deals_remote_retrieval_test.go
Normal file
104
itests/deals_remote_retrieval_test.go
Normal file
@ -0,0 +1,104 @@
|
||||
package itests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/ipld/go-car"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/itests/kit"
|
||||
)
|
||||
|
||||
func TestNetStoreRetrieval(t *testing.T) {
|
||||
kit.QuietMiningLogs()
|
||||
|
||||
blocktime := 5 * time.Millisecond
|
||||
ctx := context.Background()
|
||||
|
||||
full, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC())
|
||||
ens.InterconnectAll().BeginMining(blocktime)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// For these tests where the block time is artificially short, just use
|
||||
// a deal start epoch that is guaranteed to be far enough in the future
|
||||
// so that the deal starts sealing in time
|
||||
dealStartEpoch := abi.ChainEpoch(2 << 12)
|
||||
|
||||
rseed := 7
|
||||
|
||||
dh := kit.NewDealHarness(t, full, miner, miner)
|
||||
dealCid, res, _ := dh.MakeOnlineDeal(context.Background(), kit.MakeFullDealParams{
|
||||
Rseed: rseed,
|
||||
StartEpoch: dealStartEpoch,
|
||||
UseCARFileForStorageDeal: true,
|
||||
})
|
||||
|
||||
// create deal store
|
||||
id := uuid.New()
|
||||
rstore := bstore.NewMemorySync()
|
||||
|
||||
au, err := url.Parse(full.ListenURL)
|
||||
require.NoError(t, err)
|
||||
|
||||
switch au.Scheme {
|
||||
case "http":
|
||||
au.Scheme = "ws"
|
||||
case "https":
|
||||
au.Scheme = "wss"
|
||||
}
|
||||
|
||||
au.Path = path.Join(au.Path, "/rest/v0/store/"+id.String())
|
||||
|
||||
conn, _, err := websocket.DefaultDialer.Dial(au.String(), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_ = bstore.HandleNetBstoreWS(ctx, rstore, conn)
|
||||
|
||||
dh.PerformRetrievalWithOrder(ctx, dealCid, res.Root, false, func(offer api.QueryOffer, address address.Address) api.RetrievalOrder {
|
||||
order := offer.Order(address)
|
||||
|
||||
order.RemoteStore = &id
|
||||
|
||||
return order
|
||||
})
|
||||
|
||||
// check blockstore blocks
|
||||
carv1FilePath, _ := kit.CreateRandomCARv1(t, rseed, 200)
|
||||
cb, err := os.ReadFile(carv1FilePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
cr, err := car.NewCarReader(bytes.NewReader(cb))
|
||||
require.NoError(t, err)
|
||||
|
||||
var blocks int
|
||||
for {
|
||||
cb, err := cr.Next()
|
||||
if err == io.EOF {
|
||||
fmt.Println("blocks: ", blocks)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := rstore.Get(ctx, cb.Cid())
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, cb.RawData(), sb.RawData())
|
||||
|
||||
blocks++
|
||||
}
|
||||
}
|
@ -101,7 +101,7 @@ func testDealsRetryLackOfFunds(t *testing.T, publishStorageAccountFunds abi.Toke
|
||||
propcid := *deal
|
||||
|
||||
go func() {
|
||||
time.Sleep(3 * time.Second)
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
kit.SendFunds(ctx, t, minerFullNode, publishStorageDealKey.Address, types.FromFil(1))
|
||||
|
||||
|
@ -297,7 +297,7 @@ func startNodes(
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
srv, _ := kit.CreateRPCServer(t, handler, l)
|
||||
srv, _, _ := kit.CreateRPCServer(t, handler, l)
|
||||
|
||||
// Create a gateway client API that connects to the gateway server
|
||||
var gapi api.Gateway
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/shared_testutil"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
@ -308,6 +309,12 @@ func (dh *DealHarness) StartSealingWaiting(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, offers ...api.QueryOffer) (path string) {
|
||||
return dh.PerformRetrievalWithOrder(ctx, deal, root, carExport, func(offer api.QueryOffer, a address.Address) api.RetrievalOrder {
|
||||
return offer.Order(a)
|
||||
}, offers...)
|
||||
}
|
||||
|
||||
func (dh *DealHarness) PerformRetrievalWithOrder(ctx context.Context, deal *cid.Cid, root cid.Cid, carExport bool, makeOrder func(api.QueryOffer, address.Address) api.RetrievalOrder, offers ...api.QueryOffer) (path string) {
|
||||
var offer api.QueryOffer
|
||||
if len(offers) == 0 {
|
||||
// perform retrieval.
|
||||
@ -331,7 +338,9 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root
|
||||
updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx)
|
||||
require.NoError(dh.t, err)
|
||||
|
||||
retrievalRes, err := dh.client.ClientRetrieve(ctx, offer.Order(caddr))
|
||||
order := makeOrder(offer, caddr)
|
||||
|
||||
retrievalRes, err := dh.client.ClientRetrieve(ctx, order)
|
||||
require.NoError(dh.t, err)
|
||||
consumeEvents:
|
||||
for {
|
||||
@ -357,6 +366,11 @@ consumeEvents:
|
||||
}
|
||||
cancel()
|
||||
|
||||
if order.RemoteStore != nil {
|
||||
// if we're retrieving into a remote store, skip export
|
||||
return ""
|
||||
}
|
||||
|
||||
require.NoError(dh.t, dh.client.ClientExport(ctx,
|
||||
api.ExportRef{
|
||||
Root: root,
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/ipfs/go-datastore"
|
||||
"github.com/ipfs/go-datastore/namespace"
|
||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
@ -175,6 +176,16 @@ func (n *Ensemble) Mocknet() mocknet.Mocknet {
|
||||
return n.mn
|
||||
}
|
||||
|
||||
func (n *Ensemble) NewPrivKey() (libp2pcrypto.PrivKey, peer.ID) {
|
||||
privkey, _, err := libp2pcrypto.GenerateEd25519Key(rand.Reader)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
peerId, err := peer.IDFromPrivateKey(privkey)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
return privkey, peerId
|
||||
}
|
||||
|
||||
// FullNode enrolls a new full node.
|
||||
func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
options := DefaultNodeOpts
|
||||
@ -200,13 +211,14 @@ func (n *Ensemble) FullNode(full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
}
|
||||
|
||||
*full = TestFullNode{t: n.t, options: options, DefaultKey: key}
|
||||
|
||||
n.inactive.fullnodes = append(n.inactive.fullnodes, full)
|
||||
return n
|
||||
}
|
||||
|
||||
// Miner enrolls a new miner, using the provided full node for chain
|
||||
// interactions.
|
||||
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
func (n *Ensemble) MinerEnroll(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
require.NotNil(n.t, full, "full node required when instantiating miner")
|
||||
|
||||
options := DefaultNodeOpts
|
||||
@ -291,8 +303,16 @@ func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeO
|
||||
minerNode.Libp2p.PeerID = peerId
|
||||
minerNode.Libp2p.PrivKey = privkey
|
||||
|
||||
n.inactive.miners = append(n.inactive.miners, minerNode)
|
||||
return n
|
||||
}
|
||||
|
||||
func (n *Ensemble) AddInactiveMiner(m *TestMiner) {
|
||||
n.inactive.miners = append(n.inactive.miners, m)
|
||||
}
|
||||
|
||||
func (n *Ensemble) Miner(minerNode *TestMiner, full *TestFullNode, opts ...NodeOpt) *Ensemble {
|
||||
n.MinerEnroll(minerNode, full, opts...)
|
||||
n.AddInactiveMiner(minerNode)
|
||||
return n
|
||||
}
|
||||
|
||||
@ -358,6 +378,21 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
lr, err := r.Lock(repo.FullNode)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
ks, err := lr.KeyStore()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
if full.Pkey != nil {
|
||||
pk, err := libp2pcrypto.MarshalPrivateKey(full.Pkey.PrivKey)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
err = ks.Put("libp2p-host", types.KeyInfo{
|
||||
Type: "libp2p-host",
|
||||
PrivateKey: pk,
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
}
|
||||
|
||||
c, err := lr.Config()
|
||||
require.NoError(n.t, err)
|
||||
|
||||
@ -416,6 +451,7 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
// Construct the full node.
|
||||
stop, err := node.New(ctx, opts...)
|
||||
full.Stop = stop
|
||||
|
||||
require.NoError(n.t, err)
|
||||
|
||||
@ -425,15 +461,31 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
err = full.WalletSetDefault(context.Background(), addr)
|
||||
require.NoError(n.t, err)
|
||||
|
||||
var rpcShutdownOnce sync.Once
|
||||
var stopOnce sync.Once
|
||||
var stopErr error
|
||||
|
||||
stopFunc := stop
|
||||
stop = func(ctx context.Context) error {
|
||||
stopOnce.Do(func() {
|
||||
stopErr = stopFunc(ctx)
|
||||
})
|
||||
return stopErr
|
||||
}
|
||||
|
||||
// Are we hitting this node through its RPC?
|
||||
if full.options.rpc {
|
||||
withRPC := fullRpc(n.t, full)
|
||||
withRPC, rpcCloser := fullRpc(n.t, full)
|
||||
n.inactive.fullnodes[i] = withRPC
|
||||
full.Stop = func(ctx2 context.Context) error {
|
||||
rpcShutdownOnce.Do(rpcCloser)
|
||||
return stop(ctx)
|
||||
}
|
||||
n.t.Cleanup(func() { rpcShutdownOnce.Do(rpcCloser) })
|
||||
}
|
||||
|
||||
n.t.Cleanup(func() {
|
||||
_ = stop(context.Background())
|
||||
|
||||
})
|
||||
|
||||
n.active.fullnodes = append(n.active.fullnodes, full)
|
||||
@ -477,7 +529,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Method: power.Methods.CreateMiner,
|
||||
Params: params,
|
||||
}
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, nil)
|
||||
signed, err := m.FullNode.FullNode.MpoolPushMessage(ctx, createStorageMinerMsg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
|
||||
mw, err := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
@ -501,7 +555,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
signed, err2 := m.FullNode.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err2)
|
||||
|
||||
mw, err2 := m.FullNode.FullNode.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, api.LookbackNoLimit, true)
|
||||
@ -586,11 +642,11 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
psd := m.PresealDir
|
||||
noPaths := m.options.noStorage
|
||||
|
||||
err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
if noPaths {
|
||||
sc.StoragePaths = []paths.LocalPath{}
|
||||
sc.StoragePaths = []storiface.LocalPath{}
|
||||
}
|
||||
sc.StoragePaths = append(sc.StoragePaths, paths.LocalPath{Path: psd})
|
||||
sc.StoragePaths = append(sc.StoragePaths, storiface.LocalPath{Path: psd})
|
||||
})
|
||||
|
||||
require.NoError(n.t, err)
|
||||
@ -611,7 +667,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, nil)
|
||||
_, err2 := m.FullNode.MpoolPushMessage(ctx, msg, &api.MessageSendSpec{
|
||||
MsgUuid: uuid.New(),
|
||||
})
|
||||
require.NoError(n.t, err2)
|
||||
}
|
||||
|
||||
@ -620,6 +678,13 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
disallowRemoteFinalize := m.options.disallowRemoteFinalize
|
||||
|
||||
var mineBlock = make(chan lotusminer.MineReq)
|
||||
|
||||
copy := *m.FullNode
|
||||
copy.FullNode = modules.MakeUuidWrapper(copy.FullNode)
|
||||
m.FullNode = ©
|
||||
|
||||
//m.FullNode.FullNode = modules.MakeUuidWrapper(fn.FullNode)
|
||||
|
||||
opts := []node.Option{
|
||||
node.StorageMiner(&m.StorageMiner, cfg.Subsystems),
|
||||
node.Base(),
|
||||
@ -627,12 +692,14 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
node.Test(),
|
||||
|
||||
node.If(m.options.disableLibp2p, node.MockHost(n.mn)),
|
||||
node.Override(new(v1api.RawFullNodeAPI), m.FullNode.FullNode),
|
||||
//node.Override(new(v1api.RawFullNodeAPI), func() api.FullNode { return modules.MakeUuidWrapper(m.FullNode) }),
|
||||
//node.Override(new(v1api.RawFullNodeAPI), modules.MakeUuidWrapper),
|
||||
node.Override(new(v1api.RawFullNodeAPI), m.FullNode),
|
||||
node.Override(new(*lotusminer.Miner), lotusminer.NewTestMiner(mineBlock, m.ActorAddr)),
|
||||
|
||||
// disable resource filtering so that local worker gets assigned tasks
|
||||
// regardless of system pressure.
|
||||
node.Override(new(sectorstorage.Config), func() sectorstorage.Config {
|
||||
node.Override(new(config.SealerConfig), func() config.SealerConfig {
|
||||
scfg := config.DefaultStorageMiner()
|
||||
|
||||
if noLocal {
|
||||
@ -645,8 +712,8 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
|
||||
scfg.Storage.Assigner = assigner
|
||||
scfg.Storage.DisallowRemoteFinalize = disallowRemoteFinalize
|
||||
scfg.Storage.ResourceFiltering = sectorstorage.ResourceFilteringDisabled
|
||||
return scfg.StorageManager()
|
||||
scfg.Storage.ResourceFiltering = config.ResourceFilteringDisabled
|
||||
return scfg.Storage
|
||||
}),
|
||||
|
||||
// upgrades
|
||||
@ -737,8 +804,8 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
require.NoError(n.t, err)
|
||||
|
||||
if m.options.noStorage {
|
||||
err := lr.SetStorage(func(sc *paths.StorageConfig) {
|
||||
sc.StoragePaths = []paths.LocalPath{}
|
||||
err := lr.SetStorage(func(sc *storiface.StorageConfig) {
|
||||
sc.StoragePaths = []storiface.LocalPath{}
|
||||
})
|
||||
require.NoError(n.t, err)
|
||||
}
|
||||
@ -814,9 +881,9 @@ func (n *Ensemble) Start() *Ensemble {
|
||||
wait.Unlock()
|
||||
})
|
||||
wait.Lock()
|
||||
n.bootstrapped = true
|
||||
}
|
||||
|
||||
n.bootstrapped = true
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
libp2pcrypto "github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -16,8 +18,15 @@ import (
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/node"
|
||||
)
|
||||
|
||||
type Libp2p struct {
|
||||
PeerID peer.ID
|
||||
PrivKey libp2pcrypto.PrivKey
|
||||
}
|
||||
|
||||
// TestFullNode represents a full node enrolled in an Ensemble.
|
||||
type TestFullNode struct {
|
||||
v1api.FullNode
|
||||
@ -27,11 +36,36 @@ type TestFullNode struct {
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node.
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
ListenURL string
|
||||
DefaultKey *key.Key
|
||||
|
||||
Pkey *Libp2p
|
||||
|
||||
Stop node.StopFunc
|
||||
|
||||
options nodeOpts
|
||||
}
|
||||
|
||||
func MergeFullNodes(fullNodes []*TestFullNode) *TestFullNode {
|
||||
var wrappedFullNode TestFullNode
|
||||
var fns api.FullNodeStruct
|
||||
wrappedFullNode.FullNode = &fns
|
||||
|
||||
cliutil.FullNodeProxy(fullNodes, &fns)
|
||||
|
||||
wrappedFullNode.t = fullNodes[0].t
|
||||
wrappedFullNode.ListenAddr = fullNodes[0].ListenAddr
|
||||
wrappedFullNode.DefaultKey = fullNodes[0].DefaultKey
|
||||
wrappedFullNode.Stop = fullNodes[0].Stop
|
||||
wrappedFullNode.options = fullNodes[0].options
|
||||
|
||||
return &wrappedFullNode
|
||||
}
|
||||
|
||||
func (f TestFullNode) Shutdown(ctx context.Context) error {
|
||||
return f.Stop(ctx)
|
||||
}
|
||||
|
||||
func (f *TestFullNode) ClientImportCARFile(ctx context.Context, rseed int, size int) (res *api.ImportRes, carv1FilePath string, origFilePath string) {
|
||||
carv1FilePath, origFilePath = CreateRandomCARv1(f.t, rseed, size)
|
||||
res, err := f.ClientImport(ctx, api.FileRef{Path: carv1FilePath, IsCAR: true})
|
||||
@ -86,6 +120,10 @@ func (f *TestFullNode) WaitForSectorActive(ctx context.Context, t *testing.T, sn
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TestFullNode) AssignPrivKey(pkey *Libp2p) {
|
||||
f.Pkey = pkey
|
||||
}
|
||||
|
||||
// ChainPredicate encapsulates a chain condition.
|
||||
type ChainPredicate func(set *types.TipSet) bool
|
||||
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/wallet/key"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
@ -175,7 +174,7 @@ func (tm *TestMiner) FlushSealingBatches(ctx context.Context) {
|
||||
|
||||
const metaFile = "sectorstore.json"
|
||||
|
||||
func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, conf func(*paths.LocalStorageMeta)) storiface.ID {
|
||||
func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, conf func(*storiface.LocalStorageMeta)) storiface.ID {
|
||||
p := t.TempDir()
|
||||
|
||||
if err := os.MkdirAll(p, 0755); err != nil {
|
||||
@ -189,7 +188,7 @@ func (tm *TestMiner) AddStorage(ctx context.Context, t *testing.T, conf func(*pa
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
cfg := &paths.LocalStorageMeta{
|
||||
cfg := &storiface.LocalStorageMeta{
|
||||
ID: storiface.ID(uuid.New().String()),
|
||||
Weight: 10,
|
||||
CanSeal: false,
|
||||
|
@ -256,9 +256,6 @@ type CfgOption func(cfg *config.FullNode) error
|
||||
|
||||
func SplitstoreDiscard() NodeOpt {
|
||||
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||
//cfg.Chainstore.Splitstore.HotStoreType = "badger" // default
|
||||
//cfg.Chainstore.Splitstore.MarkSetType = "badger" // default
|
||||
//cfg.Chainstore.Splitstore.HotStoreMessageRetention = 0 // default
|
||||
cfg.Chainstore.EnableSplitstore = true
|
||||
cfg.Chainstore.Splitstore.HotStoreFullGCFrequency = 0 // turn off full gc
|
||||
cfg.Chainstore.Splitstore.ColdStoreType = "discard" // no cold store
|
||||
@ -268,9 +265,6 @@ func SplitstoreDiscard() NodeOpt {
|
||||
|
||||
func SplitstoreUniversal() NodeOpt {
|
||||
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||
//cfg.Chainstore.Splitstore.HotStoreType = "badger" // default
|
||||
//cfg.Chainstore.Splitstore.MarkSetType = "badger" // default
|
||||
//cfg.Chainstore.Splitstore.HotStoreMessageRetention = 0 // default
|
||||
cfg.Chainstore.EnableSplitstore = true
|
||||
cfg.Chainstore.Splitstore.HotStoreFullGCFrequency = 0 // turn off full gc
|
||||
cfg.Chainstore.Splitstore.ColdStoreType = "universal" // universal bs is coldstore
|
||||
@ -278,10 +272,11 @@ func SplitstoreUniversal() NodeOpt {
|
||||
})
|
||||
}
|
||||
|
||||
func SplitstoreAutoPrune() NodeOpt {
|
||||
func SplitstoreMessges() NodeOpt {
|
||||
return WithCfgOpt(func(cfg *config.FullNode) error {
|
||||
cfg.Chainstore.Splitstore.EnableColdStoreAutoPrune = true // turn on
|
||||
cfg.Chainstore.Splitstore.ColdStoreFullGCFrequency = 0 // turn off full gc
|
||||
cfg.Chainstore.EnableSplitstore = true
|
||||
cfg.Chainstore.Splitstore.HotStoreFullGCFrequency = 0 // turn off full gc
|
||||
cfg.Chainstore.Splitstore.ColdStoreType = "messages" // universal bs is coldstore, and it accepts messages
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user