Merge branch 'master' into adlrocha/consistent-bcast
This commit is contained in:
commit
894da941af
File diff suppressed because it is too large
Load Diff
@ -106,10 +106,14 @@ func main() {
|
||||
|
||||
// form the input data.
|
||||
type data struct {
|
||||
Networks []string
|
||||
SnapNames []string
|
||||
ItestFiles []string
|
||||
UnitSuites map[string]string
|
||||
}
|
||||
in := data{
|
||||
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
|
||||
SnapNames: []string{"lotus", "lotus-filecoin"},
|
||||
ItestFiles: itests,
|
||||
UnitSuites: func() map[string]string {
|
||||
ret := make(map[string]string)
|
||||
|
@ -1,11 +1,13 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
aws-cli: circleci/aws-cli@1.3.2
|
||||
docker: circleci/docker@2.1.4
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: cimg/go:1.18.1
|
||||
# Must match GO_VERSION_MIN in project root
|
||||
- image: cimg/go:1.18.8
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
@ -22,11 +24,6 @@ executors:
|
||||
|
||||
|
||||
commands:
|
||||
install-deps:
|
||||
steps:
|
||||
- run: |
|
||||
sudo apt update
|
||||
sudo apt install python-is-python3
|
||||
prepare:
|
||||
parameters:
|
||||
linux:
|
||||
@ -40,19 +37,29 @@ commands:
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- when:
|
||||
condition: <<parameters.linux>>
|
||||
steps:
|
||||
- run:
|
||||
name: Check Go Version
|
||||
command: |
|
||||
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
|
||||
echo "GO_VERSION_MIN file does not match the go version being used."
|
||||
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
|
||||
exit 1
|
||||
fi
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||
- run: sudo apt-get install python-is-python3
|
||||
|
||||
- when:
|
||||
condition: <<parameters.darwin>>
|
||||
steps:
|
||||
- run:
|
||||
name: Install Go
|
||||
command: |
|
||||
curl https://dl.google.com/go/go1.18.1.darwin-amd64.pkg -o /tmp/go.pkg && \
|
||||
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-amd64.pkg -o /tmp/go.pkg && \
|
||||
sudo installer -pkg /tmp/go.pkg -target /
|
||||
- run:
|
||||
name: Export Go
|
||||
@ -60,21 +67,12 @@ commands:
|
||||
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||
- run: go version
|
||||
- run:
|
||||
name: Install pkg-config, goreleaser, and sha512sum
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config goreleaser/tap/goreleaser coreutils
|
||||
name: Install dependencies with Homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
|
||||
- run:
|
||||
name: Install Rust
|
||||
command: |
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
- run:
|
||||
name: Install hwloc
|
||||
command: |
|
||||
mkdir ~/hwloc
|
||||
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||
cd ~/hwloc
|
||||
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||
cd hwloc-2.4.1
|
||||
./configure && make && sudo make install
|
||||
- run: git submodule sync
|
||||
- run: git submodule update --init
|
||||
download-params:
|
||||
@ -94,28 +92,13 @@ commands:
|
||||
install_ipfs:
|
||||
steps:
|
||||
- run: |
|
||||
apt update
|
||||
apt install -y wget
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.12.2/go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512
|
||||
if [ "$(sha512sum go-ipfs_v0.12.2_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.12.2_linux-amd64.tar.gz.sha512)" ]
|
||||
then
|
||||
echo "ipfs failed checksum check"
|
||||
exit 1
|
||||
fi
|
||||
tar -xf go-ipfs_v0.12.2_linux-amd64.tar.gz
|
||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||
chmod +x /usr/local/bin/ipfs
|
||||
install_ipfs_macos:
|
||||
steps:
|
||||
- run: |
|
||||
curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
|
||||
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
|
||||
pushd kubo
|
||||
sudo bash install.sh
|
||||
popd
|
||||
rm -rf kubo/
|
||||
rm kubo_v0.14.0_darwin-amd64.tar.gz
|
||||
rm -rf kubo
|
||||
rm kubo_v0.16.0_linux-amd64.tar.gz
|
||||
git_fetch_all_tags:
|
||||
steps:
|
||||
- run:
|
||||
@ -140,13 +123,12 @@ commands:
|
||||
- run:
|
||||
name: "Run a packer build"
|
||||
command: packer build << parameters.args >> << parameters.template >>
|
||||
no_output_timeout: 30m
|
||||
no_output_timeout: 1h
|
||||
|
||||
jobs:
|
||||
mod-tidy-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go mod tidy -v
|
||||
- run:
|
||||
@ -154,37 +136,6 @@ jobs:
|
||||
command: |
|
||||
git --no-pager diff go.mod go.sum
|
||||
git --no-pager diff --quiet go.mod go.sum
|
||||
build-linux:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install npm
|
||||
- run:
|
||||
command: make buildall
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
path: lotus-miner
|
||||
- store_artifacts:
|
||||
path: lotus-worker
|
||||
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux
|
||||
|
||||
build-debug:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make debug
|
||||
|
||||
test:
|
||||
description: |
|
||||
@ -214,7 +165,6 @@ jobs:
|
||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -258,7 +208,6 @@ jobs:
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
@ -301,121 +250,104 @@ jobs:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-ntwk-calibration:
|
||||
description: |
|
||||
Compile lotus binaries for the calibration network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
|
||||
build-linux-amd64:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make calibnet
|
||||
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/linux_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- linux-calibrationnet
|
||||
build-ntwk-butterfly:
|
||||
description: |
|
||||
Compile lotus binaries for the butterfly network
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make butterflynet
|
||||
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux-butterflynet
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd testplans/lotus-soup && go build -tags=testground .
|
||||
trigger-testplans:
|
||||
description: |
|
||||
Trigger `lotus-soup` test cases on TaaS
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
name: "download testground"
|
||||
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
|
||||
- run:
|
||||
name: "prepare .env.toml"
|
||||
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||
- run:
|
||||
name: "prepare testground home dir and link test plans"
|
||||
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup
|
||||
- run:
|
||||
name: "go get lotus@master"
|
||||
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||
- run:
|
||||
name: "trigger deals baseline testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
- run:
|
||||
name: "trigger payment channel stress testplan on taas"
|
||||
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||
build-macos:
|
||||
- linux_amd64_v1
|
||||
|
||||
build-darwin-amd64:
|
||||
description: build darwin lotus binary
|
||||
parameters:
|
||||
publish:
|
||||
default: false
|
||||
description: publish github release and homebrew?
|
||||
type: boolean
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
macos:
|
||||
xcode: "13.4.1"
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
steps:
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- install_ipfs_macos
|
||||
- restore_cache:
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- when:
|
||||
condition: << parameters.publish >>
|
||||
- run: make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_amd64_v1
|
||||
|
||||
build-darwin-arm64:
|
||||
description: self-hosted m1 runner
|
||||
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||
machine: true
|
||||
resource_class: filecoin-project/self-hosted-m1
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist
|
||||
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
|
||||
- prepare:
|
||||
linux: false
|
||||
darwin: true
|
||||
- run: |
|
||||
export CPATH=$(brew --prefix)/include
|
||||
export LIBRARY_PATH=$(brew --prefix)/lib
|
||||
make lotus lotus-miner lotus-worker
|
||||
- run:
|
||||
name: check tag and version output match
|
||||
command: ./scripts/version-check.sh ./lotus
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/darwin_arm64 && \
|
||||
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- darwin_arm64
|
||||
- run:
|
||||
command: make clean
|
||||
when: always
|
||||
- run:
|
||||
name: cleanup homebrew
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
|
||||
when: always
|
||||
|
||||
release:
|
||||
executor: golang
|
||||
parameters:
|
||||
dry-run:
|
||||
default: false
|
||||
description: should this release actually publish it's artifacts?
|
||||
type: boolean
|
||||
steps:
|
||||
- checkout
|
||||
- run: |
|
||||
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||
sudo apt update
|
||||
sudo apt install goreleaser-pro
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist --snapshot --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
- when:
|
||||
condition:
|
||||
not: << parameters.publish >>
|
||||
not: << parameters.dry-run >>
|
||||
steps:
|
||||
- run: goreleaser release --rm-dist --snapshot
|
||||
- run: goreleaser release --rm-dist --debug
|
||||
- run: ./scripts/generate-checksums.sh
|
||||
- store_artifacts:
|
||||
path: dist
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- dist
|
||||
- save_cache:
|
||||
name: save cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
paths:
|
||||
- "~/.rustup"
|
||||
- "~/.cargo"
|
||||
- run: ./scripts/publish-checksums.sh
|
||||
|
||||
build-appimage:
|
||||
machine:
|
||||
@ -423,7 +355,14 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Update Go
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go && \
|
||||
curl -L https://golang.org/dl/go`cat GO_VERSION_MIN`.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
|
||||
sudo tar -C /usr/local -xvf /tmp/go.tar.gz
|
||||
- run: go version
|
||||
- run:
|
||||
name: install appimage-builder
|
||||
command: |
|
||||
@ -451,13 +390,11 @@ jobs:
|
||||
command: |
|
||||
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||
make appimage
|
||||
- run:
|
||||
name: prepare workspace
|
||||
command: |
|
||||
mkdir appimage
|
||||
mv Lotus-*.AppImage appimage
|
||||
- run: |
|
||||
mkdir -p /tmp/workspace/appimage && \
|
||||
mv Lotus-*.AppImage /tmp/workspace/appimage/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- appimage
|
||||
|
||||
@ -465,7 +402,6 @@ jobs:
|
||||
gofmt:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: "! go fmt ./... 2>&1 | read"
|
||||
@ -473,7 +409,6 @@ jobs:
|
||||
gen-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make deps
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
@ -488,7 +423,6 @@ jobs:
|
||||
docs-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||
@ -525,7 +459,6 @@ jobs:
|
||||
Arguments to pass to golangci-lint
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps
|
||||
@ -553,13 +486,13 @@ jobs:
|
||||
steps:
|
||||
- run:
|
||||
name: Install git jq curl
|
||||
command: apt update && apt install -y git jq curl
|
||||
command: apt update && apt install -y git jq curl sudo
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
at: /tmp/workspace
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
steps:
|
||||
@ -604,129 +537,100 @@ jobs:
|
||||
shell: /bin/bash -o pipefail
|
||||
command: |
|
||||
snapcraft upload *.snap --release << parameters.channel >>
|
||||
|
||||
build-and-push-image:
|
||||
description: build and push docker images to public AWS ECR registry
|
||||
executor: aws-cli/default
|
||||
build-docker:
|
||||
description: >
|
||||
Publish to Dockerhub
|
||||
executor: docker/docker
|
||||
parameters:
|
||||
profile-name:
|
||||
image:
|
||||
type: string
|
||||
default: "default"
|
||||
description: AWS profile name to be configured.
|
||||
|
||||
aws-access-key-id:
|
||||
type: env_var_name
|
||||
default: AWS_ACCESS_KEY_ID
|
||||
default: lotus
|
||||
description: >
|
||||
AWS access key id for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_ACCESS_KEY.
|
||||
|
||||
aws-secret-access-key:
|
||||
type: env_var_name
|
||||
default: AWS_SECRET_ACCESS_KEY
|
||||
description: >
|
||||
AWS secret key for IAM role. Set this to the name of
|
||||
the environment variable you will set to hold this
|
||||
value, i.e. AWS_SECRET_ACCESS_KEY.
|
||||
|
||||
region:
|
||||
type: env_var_name
|
||||
default: AWS_REGION
|
||||
description: >
|
||||
Name of env var storing your AWS region information,
|
||||
defaults to AWS_REGION
|
||||
|
||||
account-url:
|
||||
type: env_var_name
|
||||
default: AWS_ECR_ACCOUNT_URL
|
||||
description: >
|
||||
Env var storing Amazon ECR account URL that maps to an AWS account,
|
||||
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
|
||||
defaults to AWS_ECR_ACCOUNT_URL
|
||||
|
||||
dockerfile:
|
||||
Passed to the docker build process to determine which image in the
|
||||
Dockerfile should be built. Expected values are `lotus`,
|
||||
`lotus-all-in-one`
|
||||
network:
|
||||
type: string
|
||||
default: Dockerfile
|
||||
description: Name of dockerfile to use. Defaults to Dockerfile.
|
||||
|
||||
path:
|
||||
type: string
|
||||
default: .
|
||||
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
|
||||
|
||||
extra-build-args:
|
||||
default: "mainnet"
|
||||
description: >
|
||||
Passed to the docker build process using GOFLAGS+=-tags=<<network>>.
|
||||
Expected values are `debug`, `2k`, `calibnet`, `butterflynet`,
|
||||
`interopnet`.
|
||||
channel:
|
||||
type: string
|
||||
default: ""
|
||||
description: >
|
||||
Extra flags to pass to docker build. For examples, see
|
||||
https://docs.docker.com/engine/reference/commandline/build
|
||||
|
||||
repo:
|
||||
type: string
|
||||
description: Name of an Amazon ECR repository
|
||||
|
||||
tag:
|
||||
type: string
|
||||
default: "latest"
|
||||
description: A comma-separated string containing docker image tags to build and push (default = latest)
|
||||
|
||||
target:
|
||||
type: string
|
||||
default: "lotus-all-in-one"
|
||||
description: Docker target to build
|
||||
|
||||
The release channel to use for this image.
|
||||
push:
|
||||
type: boolean
|
||||
default: false
|
||||
description: >
|
||||
When true, pushes the image to Dockerhub
|
||||
steps:
|
||||
- run:
|
||||
name: Confirm that environment variables are set
|
||||
command: |
|
||||
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||
echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
|
||||
circleci-agent step halt
|
||||
fi
|
||||
|
||||
- aws-cli/setup:
|
||||
profile-name: <<parameters.profile-name>>
|
||||
aws-access-key-id: <<parameters.aws-access-key-id>>
|
||||
aws-secret-access-key: <<parameters.aws-secret-access-key>>
|
||||
aws-region: <<parameters.region>>
|
||||
|
||||
- run:
|
||||
name: Log into Amazon ECR
|
||||
command: |
|
||||
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
|
||||
|
||||
- setup_remote_docker
|
||||
- checkout
|
||||
|
||||
- setup_remote_docker:
|
||||
version: 19.03.13
|
||||
docker_layer_caching: false
|
||||
|
||||
- docker/check:
|
||||
docker-username: DOCKERHUB_USERNAME
|
||||
docker-password: DOCKERHUB_PASSWORD
|
||||
- when:
|
||||
condition:
|
||||
equal: [ mainnet, <<parameters.network>> ]
|
||||
steps:
|
||||
- when:
|
||||
condition: <parameters.push>>
|
||||
steps:
|
||||
- docker/build:
|
||||
image: filecoin/<<parameters.image>>
|
||||
extra_build_args: --target <<parameters.image>>
|
||||
tag: <<parameters.channel>>
|
||||
- run:
|
||||
name: Build docker image
|
||||
name: Docker push
|
||||
command: |
|
||||
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
|
||||
|
||||
docker_tag_args=""
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
|
||||
done
|
||||
|
||||
docker build \
|
||||
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
|
||||
--target <<parameters.target>> \
|
||||
-f <<parameters.path>>/<<parameters.dockerfile>> \
|
||||
$docker_tag_args \
|
||||
<<parameters.path>>
|
||||
|
||||
docker push filecoin/<<parameters.image>>:<<parameters.channel>>
|
||||
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA}"
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA}"
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
|
||||
fi
|
||||
- unless:
|
||||
condition: <<parameters.push>>
|
||||
steps:
|
||||
- docker/build:
|
||||
image: filecoin/<<parameters.image>>
|
||||
extra_build_args: --target <<parameters.image>>
|
||||
- when:
|
||||
condition:
|
||||
not:
|
||||
equal: [ mainnet, <<parameters.network>> ]
|
||||
steps:
|
||||
- when:
|
||||
condition: <<parameters.push>>
|
||||
steps:
|
||||
- docker/build:
|
||||
image: filecoin/<<parameters.image>>
|
||||
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
|
||||
tag: <<parameters.channel>>-<<parameters.network>>
|
||||
- run:
|
||||
name: Push image to Amazon ECR
|
||||
name: Docker push
|
||||
command: |
|
||||
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||
for tag in "${DOCKER_TAGS[@]}"; do
|
||||
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
|
||||
done
|
||||
docker push filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>>
|
||||
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA}"-<<parameters.network>>
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA}"-<<parameters.network>>
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
|
||||
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
|
||||
fi
|
||||
- unless:
|
||||
condition: <<parameters.push>>
|
||||
steps:
|
||||
- docker/build:
|
||||
image: filecoin/<<parameters.image>>
|
||||
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
|
||||
|
||||
publish-packer-snap:
|
||||
description: build packer image with snap. mainnet only.
|
||||
@ -734,55 +638,8 @@ jobs:
|
||||
name: packer
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- packer_build:
|
||||
template: tools/packer/lotus-snap.pkr.hcl
|
||||
publish-dockerhub:
|
||||
description: publish to dockerhub
|
||||
machine:
|
||||
image: ubuntu-2004:202010-01
|
||||
parameters:
|
||||
tag:
|
||||
type: string
|
||||
default: latest
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: dockerhub login
|
||||
command: echo $DOCKERHUB_PASSWORD | docker login --username $DOCKERHUB_USERNAME --password-stdin
|
||||
- run:
|
||||
name: docker build
|
||||
command: |
|
||||
docker build --target lotus -t filecoin/lotus:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:<< parameters.tag >> -f Dockerfile.lotus .
|
||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_SHA1 -f Dockerfile.lotus .
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker build --target lotus -t filecoin/lotus:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-gateway -t filecoin/lotus-gateway:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
docker build --target lotus-all-in-one -t filecoin/lotus-all-in-one:$CIRCLE_TAG -f Dockerfile.lotus .
|
||||
fi
|
||||
- run:
|
||||
name: docker push
|
||||
command: |
|
||||
docker push filecoin/lotus:<< parameters.tag >>
|
||||
docker push filecoin/lotus-gateway:<< parameters.tag >>
|
||||
docker push filecoin/lotus-all-in-one:<< parameters.tag >>
|
||||
if [["[[ ! -z $CIRCLE_SHA1 ]]"]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_SHA1
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_SHA1
|
||||
fi
|
||||
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||
docker push filecoin/lotus:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-gateway:$CIRCLE_TAG
|
||||
docker push filecoin/lotus-all-in-one:$CIRCLE_TAG
|
||||
fi
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
@ -823,31 +680,42 @@ workflows:
|
||||
suite: conformance-bleeding-edge
|
||||
target: "./conformance"
|
||||
vectors-branch: specs-actors-v7
|
||||
- trigger-testplans:
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- build-linux-amd64:
|
||||
name: "Build ( linux / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- build-debug
|
||||
- build-linux:
|
||||
filters:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-calibration:
|
||||
- build-darwin-amd64:
|
||||
name: "Build ( darwin / amd64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-ntwk-butterfly:
|
||||
- build-darwin-arm64:
|
||||
name: "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-lotus-soup
|
||||
- build-macos:
|
||||
name: publish-macos
|
||||
publish: true
|
||||
- release:
|
||||
name: "Release"
|
||||
requires:
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -855,12 +723,19 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-macos:
|
||||
- release:
|
||||
name: "Release (dry-run)"
|
||||
dry-run: true
|
||||
requires:
|
||||
- "Build ( darwin / amd64 )"
|
||||
- "Build ( linux / amd64 )"
|
||||
- "Build ( darwin / arm64 )"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-appimage:
|
||||
name: "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
@ -869,22 +744,10 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-linux
|
||||
linux: true
|
||||
requires:
|
||||
- build-linux
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- publish:
|
||||
name: publish-appimage
|
||||
name: "Publish AppImage"
|
||||
appimage: true
|
||||
requires:
|
||||
- build-appimage
|
||||
- "Build AppImage"
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -892,24 +755,11 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-all-in-one
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-dev
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
target: lotus-all-in-one
|
||||
- build-and-push-image:
|
||||
name: build-and-push/lotus-test
|
||||
dockerfile: Dockerfile.lotus
|
||||
path: .
|
||||
repo: lotus-test
|
||||
tag: '${CIRCLE_SHA1:0:8}'
|
||||
target: lotus-test
|
||||
[[- range .SnapNames]]
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||
name: "Publish Snapcraft ([[.]] / stable)"
|
||||
channel: stable
|
||||
snap-name: lotus-filecoin
|
||||
snap-name: [[.]]
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -918,9 +768,9 @@ workflows:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft (lotus-filecoin / candidate)"
|
||||
name: "Publish Snapcraft ([[.]] / candidate)"
|
||||
channel: candidate
|
||||
snap-name: lotus-filecoin
|
||||
snap-name: [[.]]
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -928,10 +778,14 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft (lotus / stable)"
|
||||
[[- end]]
|
||||
[[- range .Networks]]
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / stable / [[.]])"
|
||||
image: lotus-all-in-one
|
||||
channel: stable
|
||||
snap-name: lotus
|
||||
network: [[.]]
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -939,10 +793,12 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft (lotus / candidate)"
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / candidate / [[.]])"
|
||||
image: lotus-all-in-one
|
||||
channel: candidate
|
||||
snap-name: lotus
|
||||
network: [[.]]
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
@ -950,16 +806,71 @@ workflows:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub
|
||||
tag: stable
|
||||
- build-docker:
|
||||
name: "Docker push (lotus-all-in-one / edge / [[.]])"
|
||||
image: lotus-all-in-one
|
||||
channel: master
|
||||
network: [[.]]
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- build-docker:
|
||||
name: "Docker build (lotus-all-in-one / [[.]])"
|
||||
image: lotus-all-in-one
|
||||
network: [[.]]
|
||||
push: false
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
[[- end]]
|
||||
- build-docker:
|
||||
name: "Docker push (lotus / stable / mainnet)"
|
||||
image: lotus
|
||||
channel: stable
|
||||
network: mainnet
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-docker:
|
||||
name: "Docker push (lotus / candidate / mainnet)"
|
||||
image: lotus
|
||||
channel: candidate
|
||||
network: mainnet
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||
- build-docker:
|
||||
name: "Docker push (lotus / master / mainnet)"
|
||||
image: lotus
|
||||
channel: master
|
||||
network: mainnet
|
||||
push: true
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- build-docker:
|
||||
name: "Docker build (lotus / mainnet)"
|
||||
image: lotus
|
||||
network: mainnet
|
||||
push: false
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||
|
||||
nightly:
|
||||
triggers:
|
||||
@ -970,21 +881,24 @@ workflows:
|
||||
only:
|
||||
- master
|
||||
jobs:
|
||||
[[- range .SnapNames]]
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft Nightly (lotus-filecoin / edge)"
|
||||
name: "Publish Snapcraft ([[.]] / edge)"
|
||||
channel: edge
|
||||
snap-name: lotus-filecoin
|
||||
- publish-snapcraft:
|
||||
name: "Publish Snapcraft Nightly (lotus / edge)"
|
||||
channel: edge
|
||||
snap-name: lotus
|
||||
- publish-dockerhub:
|
||||
name: publish-dockerhub-nightly
|
||||
tag: nightly
|
||||
monthly:
|
||||
snap-name: [[.]]
|
||||
[[- end]]
|
||||
[[- range .Networks]]
|
||||
- build-docker:
|
||||
name: "Docker (lotus-all-in-one / nightly / [[.]])"
|
||||
image: lotus-all-in-one
|
||||
channel: nightly
|
||||
network: [[.]]
|
||||
push: true
|
||||
[[- end]]
|
||||
biweekly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 1 * *"
|
||||
cron: "0 0 1,15 * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
|
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
chain/actors/builtin/*/v* linguist-generated=true
|
||||
chain/actors/builtin/*/message* linguist-generated=true
|
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
2
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -81,7 +81,7 @@ body:
|
||||
render: text
|
||||
description: |
|
||||
Please provide debug logs of the problem, remember you can get set log level control for:
|
||||
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/#log-level-control).
|
||||
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control).
|
||||
* lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level
|
||||
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request I make before furthur diagnosing the problem.
|
||||
validations:
|
||||
|
22
.github/pull_request_template.md
vendored
22
.github/pull_request_template.md
vendored
@ -1,21 +1,23 @@
|
||||
## Related Issues
|
||||
<!-- link all issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made.-->
|
||||
<!-- Link issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made -->
|
||||
|
||||
## Proposed Changes
|
||||
<!-- provide a clear list of the changes being made-->
|
||||
|
||||
<!-- A clear list of the changes being made -->
|
||||
|
||||
## Additional Info
|
||||
<!-- callouts, links to documentation, and etc-->
|
||||
<!-- Callouts, links to documentation, and etc -->
|
||||
|
||||
## Checklist
|
||||
|
||||
Before you mark the PR ready for review, please make sure that:
|
||||
- [ ] All commits have a clear commit message.
|
||||
- [ ] The PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||
|
||||
- [ ] Commits have a clear commit message.
|
||||
- [ ] PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||
- `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_,_perf_, _refactor_, _revert_, _style_, _test_
|
||||
- `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_, _deps_
|
||||
- [ ] This PR has tests for new functionality or change in behaviour
|
||||
- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
||||
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
||||
- [ ] New features have usage guidelines and / or documentation updates in
|
||||
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
||||
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
|
||||
- [ ] Tests exist for new functionality or change in behavior
|
||||
- [ ] CI is green
|
||||
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
@ -37,7 +37,7 @@ jobs:
|
||||
|
||||
- uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: '1.18.1'
|
||||
go-version: '1.18.8'
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -9,7 +9,6 @@
|
||||
/lotus-chainwatch
|
||||
/lotus-shed
|
||||
/lotus-sim
|
||||
/lotus-pond
|
||||
/lotus-townhall
|
||||
/lotus-fountain
|
||||
/lotus-stats
|
||||
@ -21,8 +20,6 @@
|
||||
/docgen-md
|
||||
/docgen-openrpc
|
||||
/bench.json
|
||||
/lotuspond/front/node_modules
|
||||
/lotuspond/front/build
|
||||
/cmd/lotus-townhall/townhall/node_modules
|
||||
/cmd/lotus-townhall/townhall/build
|
||||
/cmd/lotus-townhall/townhall/package-lock.json
|
||||
|
@ -43,9 +43,6 @@ issues:
|
||||
|
||||
exclude-use-default: false
|
||||
exclude-rules:
|
||||
- path: lotuspond
|
||||
linters:
|
||||
- errcheck
|
||||
|
||||
- path: node/modules/lp2p
|
||||
linters:
|
||||
|
125
.goreleaser.yaml
125
.goreleaser.yaml
@ -1,119 +1,65 @@
|
||||
project_name: lotus
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- make deps
|
||||
|
||||
universal_binaries:
|
||||
- id: lotus
|
||||
replace: true
|
||||
name_template: lotus
|
||||
ids:
|
||||
- lotus_darwin_amd64
|
||||
- lotus_darwin_arm64
|
||||
- id: lotus-miner
|
||||
replace: true
|
||||
name_template: lotus-miner
|
||||
ids:
|
||||
- lotus-miner_darwin_amd64
|
||||
- lotus-miner_darwin_arm64
|
||||
- id: lotus-worker
|
||||
replace: true
|
||||
name_template: lotus-worker
|
||||
ids:
|
||||
- lotus-worker_darwin_amd64
|
||||
- lotus-worker_darwin_arm64
|
||||
|
||||
builds:
|
||||
- id: lotus_darwin_amd64
|
||||
main: ./cmd/lotus
|
||||
- id: lotus
|
||||
binary: lotus
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-miner_darwin_amd64
|
||||
main: ./cmd/lotus-miner
|
||||
- arm64
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus
|
||||
- id: lotus-miner
|
||||
binary: lotus-miner
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-worker_darwin_amd64
|
||||
main: ./cmd/lotus-worker
|
||||
- arm64
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner
|
||||
- id: lotus-worker
|
||||
binary: lotus-worker
|
||||
builder: prebuilt
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus_darwin_arm64
|
||||
main: ./cmd/lotus
|
||||
binary: lotus
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-miner_darwin_arm64
|
||||
main: ./cmd/lotus-miner
|
||||
binary: lotus-miner
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
- id: lotus-worker_darwin_arm64
|
||||
main: ./cmd/lotus-worker
|
||||
binary: lotus-worker
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- FFI_BUILD_FROM_SOURCE=1
|
||||
- CPATH=/opt/homebrew/include
|
||||
- LIBRARY_PATH=/opt/homebrew/lib
|
||||
ldflags:
|
||||
- -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
# - id: linux
|
||||
# main: ./cmd/lotus
|
||||
# binary: lotus
|
||||
# goos:
|
||||
# - linux
|
||||
# goarch:
|
||||
# - amd64
|
||||
# env:
|
||||
# - CGO_ENABLED=1
|
||||
# ldflags:
|
||||
# - -X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.{{.ShortCommit}}
|
||||
goamd64:
|
||||
- v1
|
||||
ignore:
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
prebuilt:
|
||||
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker
|
||||
|
||||
archives:
|
||||
- id: primary
|
||||
@ -129,8 +75,7 @@ release:
|
||||
owner: filecoin-project
|
||||
name: lotus
|
||||
prerelease: auto
|
||||
name_template: "Release v{{.Version}}"
|
||||
|
||||
name_template: "v{{.Version}}"
|
||||
|
||||
brews:
|
||||
- tap:
|
||||
@ -151,10 +96,8 @@ brews:
|
||||
homepage: "https://filecoin.io"
|
||||
description: "A homebrew cask for installing filecoin-project/lotus on MacOS"
|
||||
license: MIT
|
||||
skip_upload: auto
|
||||
dependencies:
|
||||
- name: pkg-config
|
||||
- name: jq
|
||||
- name: bzr
|
||||
- name: hwloc
|
||||
|
||||
# produced manually so we can include cid checksums
|
||||
|
410
CHANGELOG.md
410
CHANGELOG.md
@ -1,5 +1,393 @@
|
||||
# Lotus changelog
|
||||
|
||||
# 1.18.1 / 2022-11-28
|
||||
|
||||
This is a small OPTIONAL patch release for the mandatory v1.18.0 release that supports the Filecoin nv17 Shark Upgrade.
|
||||
We highly recommend you to read the full [v1.18.0 release note](https://github.com/filecoin-project/lotus/releases/tag/v1.18.0) if you haven't already.
|
||||
|
||||
Note to SPs:
|
||||
If you are running into issue with updating your miner node from an earlier release and is failing to restart your miner, check your `journalctl` and see if you noticed the following error:
|
||||
```
|
||||
New sector storage: <%d>
|
||||
Nov 19 15:03:43 g0lotus01 lotus-miner[<id>]: ERROR: creating node: starting node: could not build arguments for function "reflect".makeFuncStub (/usr/local/go/src/reflect/asm_amd64.s:28): failed to build *paths.Local: received non-nil error from function "reflect".makeFuncStub (/usr/local/go/src/reflect/asm_amd64.s:28): opening path /media/data1/lotusstorage: path with ID <%d> already opened: '<path>'
|
||||
```
|
||||
If so, this check is introduced via [#9032](https://github.com/filecoin-project/lotus/pull/9032), precisely this [line](https://github.com/filecoin-project/lotus/blame/master/storage/paths/local.go#L164-L166
|
||||
). It's added to prevents double-attaching paths given it's now possible to manipulate paths at runtime. Verify storage.json configs if you encounter this error or remove the undesired depulicated path as you see fit accordingly.
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
- fix: cli: check found before dereferencing SectorInfo #9703
|
||||
|
||||
# 1.18.0 / 2022-11-15
|
||||
|
||||
> ⚠️ **Please note that from Lotus v1.17.2&^ will require a Go-version of v1.18.1&^**
|
||||
|
||||
This is the final release of the upcoming MANDATORY release of Lotus that introduces [Filecoin network v17,
|
||||
codenamed the Shark upgrade](https://github.com/filecoin-project/community/discussions/74?sort=top#discussioncomment-3825422). Shark upgrade delivers a wave of protocol refinements that will allow for useful smart contracts to be written using the FVM (eg. programmable markets, lending contracts, etc.).
|
||||
|
||||
**The Filecoin mainnet is scheduled to upgrade to nv17 at epoch 2383680, on Nov 30th on 2022-11-30T14:00:00Z. All node operators, including storage providers, must upgrade to this release before that time. Storage providers must update their daemons, miners, market and worker(s)/boost.**
|
||||
|
||||
The Shark upgrade introduces the following FIPs, delivered in [actors v9](https://github.com/filecoin-project/builtin-actors/releases/tag/v9.0.3):
|
||||
- [FIP0029 Beneficiary Address for Storage Providers](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0029.md): step towards better lending market for SP
|
||||
- [FIP0034 Fix PreCommit Deposit Independent of Sector Content](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0034.md): resolves a significant weakening of Filecoin PoRep’s security guarantees
|
||||
- ❗Pre-commit deposit will be calculated as the 20-day projection of expected reward earned by a sector with **a sector quality of 10 (i.e. full of verified deals)**, regardless of sector content. The Initial Pledge value, due when the sector is proven, is left **unchanged**.
|
||||
- [FIP0041 Forward Compatibility for PreCommit](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0041.md): enables a cleaner and easier transition to Programmable Storage Markets
|
||||
- [FIP0044 Standard Message Authentication](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0044.md): enable metadata authentication for user defined actor
|
||||
- [FIP0045 Decoupling Fil+ from Marketplace](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0045.md): DataCap and the 10x QAP is now only associated with how long DATA is wanted to be stored on the network.
|
||||
- This is a transitional state to enabling far more efficient and dynamic storage markets on Filecoin network in the future.
|
||||
- ⭐️ First Fungible Token Contract - Datacap Actor, on Filecoin! ([fungible token standard](https://github.com/filecoin-project/FIPs/blob/master/FRCs/frc-0046.md), [token contract library](https://github.com/helix-onchain/filecoin/tree/5455f4f831e0f3f20005a9a789623d7ad6dada15/frc46_token)).
|
||||
- For storage deal participants (clients and storage providers):
|
||||
- `PublishStorageDeals`/`ProveCommit(Aggregate)`/`ProveReplicaUpdates` message that includes verified deals will see a gas usage increase, more details can be found [here](https://github.com/filecoin-project/FIPs/blob/385f069b3b146c5fef4fdc1109a0e2f35f399e48/FIPS/fip-0045.md?plain=1#L784)
|
||||
- `Term` is introduced for defining how long the DataCap is assigned to a piece of data. Anyone who cares about that piece of data may extend the _term_, which incentives SPs to store the data longer on the network without a new deal/resealing.
|
||||
- There is no more diluted verified deal QAP due to deal/sector space time for new sectors that contains verified deals after this upgrade.
|
||||
- SPs may enjoy 90 days of extra QAP than deal duration by default, given `term_max` is always `deal duration + 90 days`.
|
||||
❗ We highly recommend all lotus users, especially storage providers, developers and clients to read the FIPs in detail to understand the protocol changes and potential impact to network participants!
|
||||
|
||||
## Snapshots
|
||||
|
||||
The [#fil-infra](https://filecoinproject.slack.com/archives/C039RBG3RPC) team at PL has launched a brand new Lightweight Filecoin Chain Snapshots Service to support chain management needs for the node operators, check [here](https://www.notion.so/pl-strflt/Lightweight-Filecoin-Chain-Snapshots-17e4c386f35c44548f5863afb7b5e024) for the full detail.
|
||||
We are planning to switch [the snapshot service listed in lotus docs](https://lotus.filecoin.io/lotus/manage/chain-management/#lightweight-snapshot) to the new Lightweight Filecoin Chain Snapshots Service by EOY, and deprecate public support of the current snapshots production. We recommend all users to test and switch the new service ASAP, and if you run into any issue, please report them [here](https://github.com/filecoin-project/filecoin-chain-archiver/discussions/new?category=feedback) and the team would be happy to support you! For the main differences between the old & the new service, checkout the FAQ section [here](https://www.notion.so/pl-strflt/Lightweight-Filecoin-Chain-Snapshots-17e4c386f35c44548f5863afb7b5e024)
|
||||
|
||||
## Migration
|
||||
|
||||
We are expecting a heavier than normal state migration for this upgrade due to the amount of the state changes introduced.
|
||||
All node operators, including storage providers, should be aware that two pre-migrations are being scheduled. The first pre-migration will begin at 2022-11-30T12:00:00Z (120 minutes before the real upgrade), the second pre-migration will begin at 2022-11-30T13:45:00Z (7.5 minutes before the real upgrade).
|
||||
The first pre-migration will take up to 1.5hr, depending on the amount of the historical state in the node blockstore and the hardware specs the node is running on. During this time, expect slower block validation times, increased CPU and memory usage, and longer delays for API queries.
|
||||
We recommend node operators (who haven't enbabled splistore `universal` mode) that do not care about historical chain states, to prune the chain blockstore by syncing from a snapshot 1-2 days before the upgrade.
|
||||
Note to full archival node operators: you may expect a migration that takes up to 20 min upon the upgrade, during this period your node will fall out of sync and your chain service may have some disruption. However, you can expect the node to catch up soon after the migration completes.
|
||||
|
||||
### v9 Built-in actor bundles
|
||||
|
||||
Bundles for all networks(mainnet, calibnet, and etc) are included in the lotus source tree (`build/actors/`) and embedded on build, for v9 actors you can find it [here](https://github.com/filecoin-project/lotus/blob/master/build/actors/v9.tar.zst).
|
||||
Reminder: Lotus verifies that the bundle CIDs are the right ones upon build & upgrade against the values in `build/builtin_actors_gen.go`, according to the network you are building. You may also check the bundle manifest CID matches the bundle gen-ed values by running `lotus state actor-cids --network-version 17`.
|
||||
|
||||
The manifest CID & full list of actor code CIDs for nv17 using [actor v9](https://github.com/filecoin-project/builtin-actors/releases/tag/v9.0.3) is:
|
||||
|
||||
```
|
||||
"_manifest": "bafy2bzaceb6j6666h36xnhksu3ww4kxb6e25niayfgkdnifaqi6m6ooc66i6i"
|
||||
"account": "bafk2bzacect2p7urje3pylrrrjy3tngn6yaih4gtzauuatf2jllk3ksgfiw2y"
|
||||
"cron": "bafk2bzacebcec3lffmos3nawm5cvwehssxeqwxixoyyfvejy7viszzsxzyu26"
|
||||
"datacap": "bafk2bzacebb6uy2ys7tapekmtj7apnjg7oyj4ia5t7tlkvbmwtxwv74lb2pug"
|
||||
"init": "bafk2bzacebtdq4zyuxk2fzbdkva6kc4mx75mkbfmldplfntayhbl5wkqou33i"
|
||||
"multisig": "bafk2bzacec4va3nmugyqjqrs3lqyr2ij67jhjia5frvx7omnh2isha6abxzya"
|
||||
"paymentchannel": "bafk2bzacebhdvjbjcgupklddfavzef4e4gnkt3xk3rbmgfmk7xhecszhfxeds"
|
||||
"reward": "bafk2bzacebezgbbmcm2gbcqwisus5fjvpj7hhmu5ubd37phuku3hmkfulxm2o"
|
||||
"storagemarket": "bafk2bzacec3j7p6gklk64stax5px3xxd7hdtejaepnd4nw7s2adihde6emkcu"
|
||||
"storageminer": "bafk2bzacedyux5hlrildwutvvjdcsvjtwsoc5xnqdjl73ouiukgklekeuyfl4"
|
||||
"storagepower": "bafk2bzacedsetphfajgne4qy3vdrpyd6ekcmtfs2zkjut4r34cvnuoqemdrtw"
|
||||
"system": "bafk2bzaceagvlo2jtahj7dloshrmwfulrd6e2izqev32qm46eumf754weec6c"
|
||||
"verifiedregistry": "bafk2bzacecf3yodlyudzukumehbuabgqljyhjt5ifiv4vetcfohnvsxzynwga"
|
||||
```
|
||||
|
||||
## New Features
|
||||
- Integrate actor v9:
|
||||
- test: Add invariance checks to v17 migration test ([filecoin-project/lotus#9454](https://github.com/filecoin-project/lotus/pull/9454))
|
||||
- Implement and support [FIP0045 Decoupling Fil+ from Marketplace](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0045.md):
|
||||
- fix: state: add datacap actor to actors registry ([filecoin-project/lotus#9476](https://github.com/filecoin-project/lotus/pull/9476))
|
||||
- feat: actors: Integrate builtin-actors changes for FIP-0045 ([filecoin-project/lotus#9355](https://github.com/filecoin-project/lotus/pull/9355))
|
||||
- feat: actors: Integrate datacap actor into lotus (#9348) ([filecoin-project/lotus#9348](https://github.com/filecoin-project/lotus/pull/9348))
|
||||
- feat: cli: Add commands for listing allocations and removing expired allocations ([filecoin-project/lotus#9468](https://github.com/filecoin-project/lotus/pull/9468))
|
||||
- feat: sealing pipeline: Prepare deal assigning logic for FIP-45 ([filecoin-project/lotus#9412](https://github.com/filecoin-project/lotus/pull/9412))
|
||||
- feat: add API methods to get allocations and claims ([filecoin-project/lotus#9437](https://github.com/filecoin-project/lotus/pull/9437))
|
||||
- Implement and support [FIP0029 Beneficiary Address for Storage Providers](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0029.md)
|
||||
- feat: api/cli: beneficiary withdraw api and cli #9296
|
||||
- feat: api/cli: change beneficiary propose and confirm for actors and multisigs. #9307
|
||||
|
||||
## Improvements
|
||||
- feat: wdpost: Add ability to only have single partition per msg for partitions with recovery sectors ([filecoin-project/lotus#9427](https://github.com/filecoin-project/lotus/pull/9427))
|
||||
- feat: API: support typed errors over RPC ([filecoin-project/lotus#9061](https://github.com/filecoin-project/lotus/pull/9061))
|
||||
- feat: refactor: remove NewestNetworkVersion ([filecoin-project/lotus#9351](https://github.com/filecoin-project/lotus/pull/9351))
|
||||
- chore: actors: Allow builtin-actors to return a map of methods (#9342) ([filecoin-project/lotus#9342](https://github.com/filecoin-project/lotus/pull/9342))
|
||||
|
||||
## Dependencies
|
||||
- Update FFI ([filecoin-project/lotus#9484](https://github.com/filecoin-project/lotus/pull/9484))
|
||||
- chore: deps: update go-state-types and builtin-actors for v9 release ([filecoin-project/lotus#9485](https://github.com/filecoin-project/lotus/pull/9485))
|
||||
- deps: backport: #9455 ([filecoin-project/lotus#9463](https://github.com/filecoin-project/lotus/pull/9463))
|
||||
- Deps: Update go-fil-markets to 1.24.0-v17 ([filecoin-project/lotus#9450](https://github.com/filecoin-project/lotus/pull/9450))
|
||||
- github.com/filecoin-project/go-jsonrpc (v0.1.7 -> v0.1.8)
|
||||
- github.com/filecoin-project/go-state-types (v0.1.12-beta -> v0.9.0):
|
||||
|
||||
## Others
|
||||
- fix: upgrade: no splash banner for nv17 :( ([filecoin-project/lotus#9486](https://github.com/filecoin-project/lotus/pull/9486))
|
||||
- chore: build: add calib upgrade param for shark ([filecoin-project/lotus#9483](https://github.com/filecoin-project/lotus/pull/9483))
|
||||
- chore: update butterfly artifacts ([filecoin-project/lotus#9467](https://github.com/filecoin-project/lotus/pull/9467))
|
||||
- chore: butterfly: update assets ([filecoin-project/lotus#9462](https://github.com/filecoin-project/lotus/pull/9462))
|
||||
- Delete lotus-pond (#9352) ([filecoin-project/lotus#9352](https://github.com/filecoin-project/lotus/pull/9352))
|
||||
- build: set version to v1.18.0-dev
|
||||
|
||||
## lotus-market EOL notice
|
||||
|
||||
As mentioned in [lotus v1.17.0 release notes](https://github.com/filecoin-project/lotus/releases/tag/v1.17.0), markets related features, enhancements and fixes is now lower priority for Lotus. We recommend our users to migrate to other deal making focused software, like [boost](https://boost.filecoin.io/) as soon as possible. That being said, the lotus maintainers will be:
|
||||
- Lotus maintainers will stop supporting lotus-market subcomponent/**storage** deal making related issues or enhancements on Jan 31, 2023.
|
||||
- In Q2 2023, we will be deprecating/removing lotus-market related code from this repository.
|
||||
If you have any questions or concerns, please raise them in [Lotus discussion](https://github.com/filecoin-project/lotus/discussions/categories/market)!
|
||||
|
||||
|
||||
## Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| @geoff-vball | 73 | +14533/-19712 | 509 |
|
||||
| @arajasek | 16 | +2230/-303 | 49 |
|
||||
| @arajasek | 29 | +701/-297 | 117 |
|
||||
| @magik6k | 5 | +429/-135 | 45 |
|
||||
| @Frrist | 1 | +246/-203 | 25 |
|
||||
| @stebalien | 2 | +323/-2 | 6 |
|
||||
| @shrenujbansal | 3 | +176/-61 | 10 |
|
||||
| @ZenGround0 | 2 | +78/-38 | 5 |
|
||||
| @jennijuju | 8 | +97/-18 | 16 |
|
||||
| @simlecode | 5 | +18/-9 | 11 |
|
||||
| Kevin Li | 1 | +7/-0 | 1 |
|
||||
| @zenground0 | 2 | +3/-3 | 3 |
|
||||
| @jennijuju | 1 | +3/-3 | 2 |
|
||||
| Rod Vagg | 1 | +3/-2 | 2 |
|
||||
| @jennijuju | 1 | +2/-2 | 2 |
|
||||
| Peter Rabbitson | 1 | +3/-0 | 1 |
|
||||
| Jakub Sztandera | 1 | +1/-1 | 1 |
|
||||
|
||||
# v1.17.2 / 2022-10-05
|
||||
|
||||
This is an OPTIONAL release of Lotus. This feature release introduces new sector number management APIs in Lotus that enables all the Sealing-as-a-Service and Lotus interactions needed to function. The default propagation delay setting for storage providers has also been changed, as well as numerous other features and enhancements. Check out the sub-bullet points in the feature and enhancement section to get a short description about each feature and enhancements.
|
||||
|
||||
### Highlights
|
||||
|
||||
🦭 **SaaS** 🦭
|
||||
New sector management APIs makes it possible to import partially sealed sectors into Lotus. This release implements all SaaS<->Lotus interactions needed for such services to work. Deep dive into the new APIs here: https://github.com/filecoin-project/lotus/discussions/9079#discussioncomment-3652044
|
||||
|
||||
⏳ **Propagation delay** ⌛️
|
||||
In v1.17.2 the default PropagationDelay has been raised from 6 seconds -> 10 seconds, and you can tune this yourself with the `PROPAGATION_DELAY_SECS` environment variable. This means you will now wait for 10 seconds for other blocks to arrive from the network before computing a winningPoSt (if eligible). In your `lotus-miner` logs that means you will see this "baseDeltaSeconds": 10 as default.
|
||||
|
||||
⚠️ **Please note that Lotus v1.17.2 will require a Go-version of v1.18.1 or higher!**
|
||||
|
||||
## New features
|
||||
- feat: sealing: Partially sealed sector import ([filecoin-project/lotus#9210](https://github.com/filecoin-project/lotus/pull/9210))
|
||||
- Implements support for importing (partially) sealed sectors which is needed for enabling external sealing services.
|
||||
- feat: sealing: Use bitfields to manage sector numbers ([filecoin-project/lotus#9183](https://github.com/filecoin-project/lotus/pull/9183))
|
||||
- Needed for Sealing-as-a-Service. Move sector number assigning logic to use stored bitfields instead of stored counters. Makes it possible to reserve ranges of sector numbers, see the sector assigner state and view sector number reservations.
|
||||
- feat: env: propagation delay ([filecoin-project/lotus#9290](https://github.com/filecoin-project/lotus/pull/9290))
|
||||
- The default propagation delay is raised to 10 seconds from 6 seconds. Ability to set it yourself with the `PROPAGATION_DELAY_SECS` environment variable.
|
||||
- feat: cli: lotus info cmd ([filecoin-project/lotus#9233](https://github.com/filecoin-project/lotus/pull/9233))
|
||||
- A new `lotus info` command that prints useful node information in one place.
|
||||
- feat: proving: Introduce manual sector fault recovery (#9144) ([filecoin-project/lotus#9144](https://github.com/filecoin-project/lotus/pull/9144))
|
||||
- Allow users to declare fault recovery messages manually with the `lotus-miner proving recover-faults` command, rather than waiting for it to happen automatically before windowPost.
|
||||
- feat: api: Reintroduce StateActorManifestCID ([filecoin-project/lotus#9201](https://github.com/filecoin-project/lotus/pull/9201))
|
||||
- Adds ability to retrieve the Actor Manifest CID through the api.
|
||||
- feat: message: Add uuid to mpool message sent to chain node from miner ([filecoin-project/lotus#9174](https://github.com/filecoin-project/lotus/pull/9174))
|
||||
- Adds a UUID to each message sent by the `lotus-miner` to the daemon. A requirement needed for https://github.com/filecoin-project/lotus/issues/9130
|
||||
- feat: message: Add retries to mpool push message from lotus-miner ([filecoin-project/lotus#9177](https://github.com/filecoin-project/lotus/pull/9177))
|
||||
- Retries to mpool push message API in case of unavailability of the lotus chain node.
|
||||
|
||||
**Network 17 related features :**
|
||||
- feat: network: add nv17 and integrate the corresponding go state type ([filecoin-project/lotus#9267](https://github.com/filecoin-project/lotus/pull/9267))
|
||||
- feat: cli: print beneficiary info in state miner-info ([filecoin-project/lotus#9308](https://github.com/filecoin-project/lotus/pull/9308))
|
||||
- feat: api/cli: change beneficiary propose and confirm for actors and multisigs. ([filecoin-project/lotus#9307](https://github.com/filecoin-project/lotus/pull/9307))
|
||||
- feat: api/cli: beneficiary withdraw api and cli ([filecoin-project/lotus#9296](https://github.com/filecoin-project/lotus/pull/9296))
|
||||
|
||||
## Enhancements
|
||||
- feat: sectors renew --only-cc ([filecoin-project/lotus#9184](https://github.com/filecoin-project/lotus/pull/9184))
|
||||
- Exlude extending deal-related sectors with the `--only-cc` option when using the `lotus-miner sectors renew`
|
||||
- feat: miner: display updated & update-cache for storage list ([filecoin-project/lotus#9323](https://github.com/filecoin-project/lotus/pull/9323))
|
||||
- Show amount of `updated` & `update-cache` sectors in each storage path in the `lotus-miner storage list` output
|
||||
- feat: add descriptive errors to markets event handler ([filecoin-project/lotus#9326](https://github.com/filecoin-project/lotus/pull/9326))
|
||||
- More descriptive market error logs
|
||||
- feat: cli: Add option to terminate sectors from worker address ([filecoin-project/lotus#9291](https://github.com/filecoin-project/lotus/pull/9291))
|
||||
- Adds a flag to allow either owner address or worker address to send terminate sectors message.
|
||||
- fix: cli: actor-cids cli command now defaults to current network ([filecoin-project/lotus#9321](https://github.com/filecoin-project/lotus/pull/9321))
|
||||
- Makes the command defaults to the current network.
|
||||
- fix: ux: Output bytes in `lotus client commP` cmd ([filecoin-project/lotus#9189](https://github.com/filecoin-project/lotus/pull/9189))
|
||||
- Adds an additional line that outputs bytes in the `lotus client commP` command.
|
||||
- fix: sealing: Add information on what worker a job was assigned to in logs ([filecoin-project/lotus#9151](https://github.com/filecoin-project/lotus/pull/9151))
|
||||
- Adds the worker hostname into the assignment logs.
|
||||
- refactor: sealing pipeline: Remove useless storage adapter code ([filecoin-project/lotus#9142](https://github.com/filecoin-project/lotus/pull/9142)
|
||||
- Remove proxy code in `storage/miner.go` / `storage/miner_sealing.go`, call the pipeline directly instead.
|
||||
|
||||
## Bug fixes
|
||||
- fix: ffiwrapper: Close readers in AddPiece ([filecoin-project/lotus#9328](https://github.com/filecoin-project/lotus/pull/9328))
|
||||
- fix: sealing: Drop unused PreCommitInfo from pipeline.SectorInfo ([filecoin-project/lotus#9325](https://github.com/filecoin-project/lotus/pull/9325))
|
||||
- fix: cli: fix panic in `lotus-miner actor control list` ([filecoin-project/lotus#9241](https://github.com/filecoin-project/lotus/pull/9241))
|
||||
- fix: sealing: Abort upgrades in sectors with no deals ([filecoin-project/lotus#9310](https://github.com/filecoin-project/lotus/pull/9310))
|
||||
- fix: sealing: Make DataCid resource env vars make more sense ([filecoin-project/lotus#9231](https://github.com/filecoin-project/lotus/pull/9231))
|
||||
- fix: cli: Option to specify --from msg sender ([filecoin-project/lotus#9237](https://github.com/filecoin-project/lotus/pull/9237))
|
||||
- fix: ux: better ledger rejection error ([filecoin-project/lotus#9242](https://github.com/filecoin-project/lotus/pull/9242))
|
||||
- fix: ux: msg receipt for actor withdrawal ([filecoin-project/lotus#9155](https://github.com/filecoin-project/lotus/pull/9155))
|
||||
- fix: ux: exclude negative available balance from spendable amount ([filecoin-project/lotus#9182](https://github.com/filecoin-project/lotus/pull/9182))
|
||||
- fix: sealing: Avoid panicking in handleUpdateActivating on startup ([filecoin-project/lotus#9331](https://github.com/filecoin-project/lotus/pull/9331))
|
||||
- fix: api: DataCid - ensure reader is closed ([filecoin-project/lotus#9230](https://github.com/filecoin-project/lotus/pull/9230))
|
||||
- fix: verifreg: serialize RmDcProposalID as int, not tuple ([filecoin-project/lotus#9206](https://github.com/filecoin-project/lotus/pull/9206))
|
||||
- fix: api: Ignore uuid check for messages with uuid not set ([filecoin-project/lotus#9303](https://github.com/filecoin-project/lotus/pull/9303))
|
||||
- fix: cgroupV1: memory.memsw.usage_in_bytes: no such file or directory ([filecoin-project/lotus#9202](https://github.com/filecoin-project/lotus/pull/9202))
|
||||
- fix: miner: init miner's with 32GiB sectors by default ([filecoin-project/lotus#9364](https://github.com/filecoin-project/lotus/pull/9364))
|
||||
- fix: worker: Close all storage paths on worker shutdown ([filecoin-project/lotus#9153](https://github.com/filecoin-project/lotus/pull/9153))
|
||||
- fix: build: set PropagationDelaySecs correctly ([filecoin-project/lotus#9358](https://github.com/filecoin-project/lotus/pull/9358))
|
||||
- fix: renew --only-cc with sectorfile ([filecoin-project/lotus#9428](https://github.com/filecoin-project/lotus/pull/9428))
|
||||
|
||||
## Dependency updates
|
||||
- github.com/filecoin-project/go-fil-markets (v1.23.1 -> v1.24.0)
|
||||
- github.com/filecoin-project/go-jsonrpc (v0.1.5 -> v0.1.7)
|
||||
- github.com/filecoin-project/go-state-types (v0.1.10 -> v0.1.12-beta)
|
||||
- github.com/filecoin-project/go-commp-utils/nonffi (null -> v0.0.0-20220905160352-62059082a837)
|
||||
- deps: go-libp2p-pubsub v0.8.0 ([filecoin-project/lotus#9229](https://github.com/filecoin-project/lotus/pull/9229))
|
||||
- deps: libp2p v0.22 ([filecoin-project/lotus#9216](https://github.com/filecoin-project/lotus/pull/9216))
|
||||
- deps: Use latest cbor-gen ([filecoin-project/lotus#9335](https://github.com/filecoin-project/lotus/pull/9335))
|
||||
- chore: update bitswap and some libp2p packages ([filecoin-project/lotus#9279](https://github.com/filecoin-project/lotus/pull/9279))
|
||||
|
||||
## Others
|
||||
- chore: merge releases into master after v1.17.1 release ([filecoin-project/lotus#9283](https://github.com/filecoin-project/lotus/pull/9283))
|
||||
- chore: docs: Fix dead links to docs.filecoin.io ([filecoin-project/lotus#9304](https://github.com/filecoin-project/lotus/pull/9304))
|
||||
- chore: deps: update FFI ([filecoin-project/lotus#9330](https://github.com/filecoin-project/lotus/pull/9330))
|
||||
- chore: seed: add cmd for adding signers to rkh to genesis ([filecoin-project/lotus#9198](https://github.com/filecoin-project/lotus/pull/9198))
|
||||
- chore: fix typo in comment ([filecoin-project/lotus#9161](https://github.com/filecoin-project/lotus/pull/9161))
|
||||
- chore: cli: cleanup and standardize cli ([filecoin-project/lotus#9317](https://github.com/filecoin-project/lotus/pull/9317))
|
||||
- chore: versioning: Bump version to v1.17.2-dev ([filecoin-project/lotus#9147](https://github.com/filecoin-project/lotus/pull/9147))
|
||||
- chore: release: v1.17.2-rc1 ([filecoin-project/lotus#9339](https://github.com/filecoin-project/lotus/pull/9339))
|
||||
- feat: shed: add a --max-size flag to vlog2car ([filecoin-project/lotus#9212](https://github.com/filecoin-project/lotus/pull/9212))
|
||||
- fix: docsgen: revert rename of API Name to Num ([filecoin-project/lotus#9315](https://github.com/filecoin-project/lotus/pull/9315))
|
||||
- fix: ffi: Revert accidental filecoin-ffi downgrade from #9144 ([filecoin-project/lotus#9277](https://github.com/filecoin-project/lotus/pull/9277))
|
||||
- fix: miner: Call SyncBasefeeCheck from `lotus info` ([filecoin-project/lotus#9281](https://github.com/filecoin-project/lotus/pull/9281))
|
||||
- fix: mock sealer: grab lock in ReadPiece ([filecoin-project/lotus#9207](https://github.com/filecoin-project/lotus/pull/9207))
|
||||
- refactor: use `os.ReadDir` for lightweight directory reading ([filecoin-project/lotus#9282](https://github.com/filecoin-project/lotus/pull/9282))
|
||||
- tests: cli: Don't panic with no providers in client retrieve ([filecoin-project/lotus#9232](https://github.com/filecoin-project/lotus/pull/9232))
|
||||
- build: artifacts: butterfly ([filecoin-project/lotus#9027](https://github.com/filecoin-project/lotus/pull/9027))
|
||||
- build: Use lotus snap (and fix typo) for packer builds ([filecoin-project/lotus#9152](https://github.com/filecoin-project/lotus/pull/9152))
|
||||
- build: Update xcode version for macos builds ([filecoin-project/lotus#9170](https://github.com/filecoin-project/lotus/pull/9170))
|
||||
- ci: build: Snap daemon autorun disable ([filecoin-project/lotus#9167](https://github.com/filecoin-project/lotus/pull/9167))
|
||||
- ci: Use golang 1.18.1 to build appimage ([filecoin-project/lotus#9389](https://github.com/filecoin-project/lotus/pull/9389))
|
||||
- ci: Don't publish new homebrew releases for RC builds ([filecoin-project/lotus#9350](https://github.com/filecoin-project/lotus/pull/9350))
|
||||
- Merge branch 'deps/go-libp2p-v0.21'
|
||||
|
||||
Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Aayush Rajasekaran | 8 | +23010/-2122 | 109 |
|
||||
| Aayush | 15 | +6168/-2679 | 360 |
|
||||
| Łukasz Magiera | 69 | +6462/-2137 | 606 |
|
||||
| Geoff Stuart | 19 | +3080/-1177 | 342 |
|
||||
| Marco Munizaga | 16 | +543/-424 | 41 |
|
||||
| Shrenuj Bansal | 30 | +485/-419 | 88 |
|
||||
| LexLuthr | 3 | +498/-12 | 19 |
|
||||
| Phi | 15 | +330/-70 | 17 |
|
||||
| Jennifer Wang | 7 | +132/-12 | 11 |
|
||||
| TippyFlitsUK | 1 | +43/-45 | 12 |
|
||||
| Steven Allen | 1 | +18/-28 | 2 |
|
||||
| Frrist | 1 | +19/-11 | 2 |
|
||||
| Eng Zer Jun | 1 | +14/-11 | 6 |
|
||||
| Dirk McCormick | 2 | +23/-1 | 3 |
|
||||
| Ian Davis | 3 | +7/-9 | 3 |
|
||||
| Masih H. Derkani | 1 | +11/-0 | 1 |
|
||||
| Anton Evangelatov | 1 | +11/-0 | 1 |
|
||||
| Yu | 2 | +4/-4 | 5 |
|
||||
| Hannah Howard | 1 | +4/-4 | 1 |
|
||||
| Phi-rjan | 1 | +1/-2 | 1 |
|
||||
| Jiaying Wang | 1 | +3/-0 | 1 |
|
||||
| nujz | 1 | +1/-1 | 1 |
|
||||
| Rob Quist | 1 | +1/-1 | 1 |
|
||||
|
||||
# v1.17.1 / 2022-09-06
|
||||
|
||||
This is an optional release of Lotus. This release introduces the [Splitstore v2 - beta](https://github.com/filecoin-project/lotus/blob/master/blockstore/splitstore/README.md)(beta). Splitstore aims to reduce the node performance impact that's caused by the Filecoin's very large, and continuously growing datastore. Splitstore v2 introduces the coldstore auto prune/GC feature & some improvements for the hotstore. We welcome all lotus users to join the early testers and try the new Splitstore out, you can leave any feedback or report issues in [this discussion](https://github.com/filecoin-project/lotus/discussions/9179) or create an issue. As always, multiple small bug fixes, new features & improvements are also included in this release.
|
||||
|
||||
|
||||
## New features
|
||||
|
||||
- feat:chain:splitstore auto prune ([filecoin-project/lotus#9123](https://github.com/filecoin-project/lotus/pull/9123))
|
||||
- Trigger SplitStore chain prune on head events. [Link to the documentation](https://lotus.filecoin.io/lotus/manage/chain-management/#cold-store-garbage-collection)
|
||||
- feat:chain:splitstore chain prune ([filecoin-project/lotus#9056](https://github.com/filecoin-project/lotus/pull/9056))
|
||||
- Adds `chain prune` command to trigger manual garbage collection. [Link to the documentation](https://lotus.filecoin.io/lotus/manage/chain-management/#cold-store-garbage-collection)
|
||||
- feat: storage: Path type filters ([filecoin-project/lotus#9013](https://github.com/filecoin-project/lotus/pull/9013))
|
||||
- Adds new fields to `sectorstore.json` to allow file type filtering. [Link to the documentation](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/#filter-sector-types-1)
|
||||
- feat: sealing: storage redeclare/detach ([filecoin-project/lotus#9032](https://github.com/filecoin-project/lotus/pull/9032))
|
||||
- Adds new Lotus commands to detach and redeclare storage paths. [Link to the documentation](https://lotus.filecoin.io/storage-providers/operate/custom-storage-layout/#detach-storage-paths)
|
||||
- feat: worker: Add stop cmd for lotus worker ([filecoin-project/lotus#9101](https://github.com/filecoin-project/lotus/pull/9101))
|
||||
- Adds new `lotus-worker stop` command. [Link to the documentation](https://lotus.filecoin.io/storage-providers/seal-workers/seal-workers/#stop-the-worker)
|
||||
- feat: market: Add lotus-shed cmd to get total active deal storage ([filecoin-project/lotus#9113](https://github.com/filecoin-project/lotus/pull/9113))
|
||||
- `get-deals-total-storage` - View the total storage available in all active market deals
|
||||
- feat: wdpost: Envvar for limiting recovering sectors ([filecoin-project/lotus#9106](https://github.com/filecoin-project/lotus/pull/9106))
|
||||
- Adds new envvar to limit the number of sectors declared in the recover message
|
||||
|
||||
|
||||
## Improvements
|
||||
|
||||
- feat: sealing: Allow overriding worker hostname ([filecoin-project/lotus#9116](https://github.com/filecoin-project/lotus/pull/9116))
|
||||
- feat: build: run fiximports on make actors-gen ([filecoin-project/lotus#9114](https://github.com/filecoin-project/lotus/pull/9114))
|
||||
- feat: FVM: always enable tracing for user-triggered executions ([filecoin-project/lotus#9036](https://github.com/filecoin-project/lotus/pull/9036))
|
||||
- feat: miner cli: proving deadline command enchantments ([filecoin-project/lotus#9109](https://github.com/filecoin-project/lotus/pull/9109))
|
||||
- FVM: Use MaxInt64 for Implicit Message gas limits ([filecoin-project/lotus#9037](https://github.com/filecoin-project/lotus/pull/9037))
|
||||
- lotus shed addr decode
|
||||
- push lotus-gateway to docker hub ([filecoin-project/lotus#8969](https://github.com/filecoin-project/lotus/pull/8969))
|
||||
- Review Response
|
||||
- test: net: net and conngater tests ([filecoin-project/lotus#8084](https://github.com/filecoin-project/lotus/pull/8084))
|
||||
- Update FFI ([filecoin-project/lotus#9139](https://github.com/filecoin-project/lotus/pull/9139))
|
||||
|
||||
## Bug Fixes
|
||||
|
||||
- backport: 9153: detach storage on worker shutdown ([filecoin-project/lotus#9127](https://github.com/filecoin-project/lotus/pull/9165))
|
||||
- fix makegen
|
||||
- fix: build: use GOCC when building lotus-fountain ([filecoin-project/lotus#9127](https://github.com/filecoin-project/lotus/pull/9127))
|
||||
- fix: ci: Forgot a .sh on the end of a the new publish script ([filecoin-project/lotus#9088](https://github.com/filecoin-project/lotus/pull/9088))
|
||||
- fix: cli: ./lotus-miner actor control list, if the owner is not account ([filecoin-project/lotus#9072](https://github.com/filecoin-project/lotus/pull/9072))
|
||||
- fix: deps: update FFI to fix a slow memory leak ([filecoin-project/lotus#9042](https://github.com/filecoin-project/lotus/pull/9042))
|
||||
- fix: FVM: record message applied metrics ([filecoin-project/lotus#9052](https://github.com/filecoin-project/lotus/pull/9052))
|
||||
- fix: gas: estimate gas with a zero base-fee ([filecoin-project/lotus#8991](https://github.com/filecoin-project/lotus/pull/8991))
|
||||
- fix: post: restrict recoveries per deadline ([filecoin-project/lotus#9111](https://github.com/filecoin-project/lotus/pull/9111))
|
||||
- fix: sealing: Workaround for sealing bug ([filecoin-project/lotus#9043](https://github.com/filecoin-project/lotus/pull/9043))
|
||||
- fix: storage: don't panic in getCommitCutoff when precommit is not found ([filecoin-project/lotus#9141](https://github.com/filecoin-project/lotus/pull/9141))
|
||||
- fix: test: deflake TestQuotePriceForUnsealedRetrieval ([filecoin-project/lotus#9084](https://github.com/filecoin-project/lotus/pull/9084))
|
||||
|
||||
## Dependency Updates
|
||||
|
||||
- github.com/multiformats/go-multibase (v0.0.3 -> v0.1.1)
|
||||
|
||||
## Others
|
||||
|
||||
- chore: ci: Update xcode version for macos builds ([filecoin-project/lotus#9164)](https://github.com/filecoin-project/lotus/pull/9164))
|
||||
- Merge branch 'docs/ysrotciv-desc'
|
||||
- Merge branch 'feat/f8-worker-env'
|
||||
- Merge branch 'LexLuthr-feat/minerWithdrawBalanceAPI'
|
||||
- Merge branch 'LexLuthr-feat/SchedRemoveRequest'
|
||||
- base256emoji ([filecoin-project/lotus#9038)](https://github.com/filecoin-project/lotus/pull/9038))
|
||||
- chore: interop: update interop assets ([filecoin-project/lotus#9093)](https://github.com/filecoin-project/lotus/pull/9093))
|
||||
- chore: merge: releases (v1.17.0) to master ([filecoin-project/lotus#9146)](https://github.com/filecoin-project/lotus/pull/9146))
|
||||
- chore: sealer: Fixup typos ([filecoin-project/lotus#9040)](https://github.com/filecoin-project/lotus/pull/9040))
|
||||
- chore:docs:remove readme reference to deprecated specs-actors ([filecoin-project/lotus#8984)](https://github.com/filecoin-project/lotus/pull/8984))
|
||||
- ci : Change default shell options for snapcraft publish ([filecoin-project/lotus#9122)](https://github.com/filecoin-project/lotus/pull/9122))
|
||||
- ci: More tweaks to snapcraft release process ([filecoin-project/lotus#9090)](https://github.com/filecoin-project/lotus/pull/9090))
|
||||
- ci: Publish to both lotus and lotus-filecoin for snap ([filecoin-project/lotus#9119)](https://github.com/filecoin-project/lotus/pull/9119))
|
||||
- ci: Run snap builds for lotus and lotus-filecoin in parallel ([filecoin-project/lotus#9133)](https://github.com/filecoin-project/lotus/pull/9133))
|
||||
- ci: Switches goreleaser notes back to default (keep-existing) ([filecoin-project/lotus#9120)](https://github.com/filecoin-project/lotus/pull/9120))
|
||||
- ci: update snapcraft and release flow logic ([filecoin-project/lotus#8994)](https://github.com/filecoin-project/lotus/pull/8994))
|
||||
- ci: Use goreleaser to build macos universal binaries (including M1 macs) ([filecoin-project/lotus#9096)](https://github.com/filecoin-project/lotus/pull/9096))
|
||||
- ci:testing:remove codecov ([filecoin-project/lotus#9062)](https://github.com/filecoin-project/lotus/pull/9062))
|
||||
|
||||
|
||||
Contributors
|
||||
|
||||
| Contributor | Commits | Lines ± | Files Changed |
|
||||
|-------------|---------|---------|---------------|
|
||||
| Łukasz Magiera | 34 | +2329/-317 | 163 |
|
||||
| ZenGround0 | 2 | +1527/-89 | 38 |
|
||||
| Ian Davis | 14 | +751/-232 | 30 |
|
||||
| LexLuthr | 17 | +480/-225 | 63 |
|
||||
| TheMenko | 4 | +323/-61 | 5 |
|
||||
| Aayush | 10 | +285/-92 | 30 |
|
||||
| beck | 3 | +143/-93 | 3 |
|
||||
| Steven Allen | 4 | +95/-75 | 9 |
|
||||
| zenground0 | 5 | +44/-116 | 9 |
|
||||
| Shrenuj Bansal | 7 | +136/-7 | 16 |
|
||||
| Patrick Deuse | 3 | +76/-57 | 3 |
|
||||
| Jennifer Wang | 3 | +6/-52 | 11 |
|
||||
| zl | 2 | +20/-16 | 2 |
|
||||
| Aayush Rajasekaran | 2 | +6/-6 | 2 |
|
||||
| Clint Armstrong | 1 | +7/-3 | 1 |
|
||||
| Cory Schwartz | 2 | +9/-0 | 2 |
|
||||
| Jorropo | 1 | +3/-2 | 2 |
|
||||
| Geoff Stuart | 1 | +5/-0 | 1 |
|
||||
| Frank Y | 1 | +2/-2 | 2 |
|
||||
| Aloxaf | 1 | +2/-2 | 1 |
|
||||
|
||||
|
||||
# Lotus changelog
|
||||
|
||||
# v1.17.0 / 2022-08-02
|
||||
|
||||
This is an optional release of Lotus. This feature release introduces a lot of new sealing and scheduler improvements, and many other functionalities and bug fixes.
|
||||
@ -1336,7 +1724,7 @@ storage providers and clients.
|
||||
|
||||
## Highlights
|
||||
- 🌟🌟🌟 Introduce Dagstore and CARv2 for deal-making (#6671) ([filecoin-project/lotus#6671](https://github.com/filecoin-project/lotus/pull/6671))
|
||||
- **[lotus miner markets' Dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview)** is a
|
||||
- **[lotus miner markets' Dagstore](https://lotus.filecoin.io/storage-providers/operate/dagstore/)** is a
|
||||
component of the `markets` subsystem in lotus-miner. It is a sharded store to hold large IPLD graphs efficiently,
|
||||
packaged as location-transparent attachable CAR files and it replaces the former Badger staging blockstore. It
|
||||
is designed to provide high efficiency and throughput, and minimize resource utilization during deal-making operations.
|
||||
@ -1344,18 +1732,18 @@ storage providers and clients.
|
||||
blockstores, which are served as the direct medium for data exchanges in markets for both storage and retrieval
|
||||
deal making without requiring intermediate buffers.
|
||||
- In the future, lotus will leverage and interact with Dagstore a lot for new features and improvements for deal
|
||||
making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://docs.filecoin.io/mine/lotus/dagstore/#conceptual-overview) thoroughly to learn more about Dagstore's
|
||||
making, therefore, it's highly recommended to lotus users to go through [Lotus Miner: About the markets dagstore](https://lotus.filecoin.io/storage-providers/operate/dagstore/) thoroughly to learn more about Dagstore's
|
||||
conceptual overview, terminology, directory structure, configuration and so on.
|
||||
- **Note**:
|
||||
- When you first start your lotus-miner or market subsystem with this release, a one-time/first-time **dagstore migration** will be triggered which replaces the former Badger staging blockstore with dagstore. We highly
|
||||
recommend storage providers to read this [section](https://docs.filecoin.io/mine/lotus/dagstore/#first-time-migration) to learn more about
|
||||
recommend storage providers to read this [section](https://lotus.filecoin.io/storage-providers/operate/dagstore/#first-time-migration) to learn more about
|
||||
what the process does, what to expect and how monitor it.
|
||||
- It is highly recommended to **wait all ongoing data transfer to finish or cancel inbound storage deals that
|
||||
are still transferring**, using the `lotus-miner data-transfers cancel` command before upgrade your market nodes. Reason being that the new dagstore changes attributes in the internal deal state objects, and the paths to the staging CARs where the deal data was being placed will be lost.
|
||||
- ‼️Having your dags initialized will become important in the near feature for you to provide a better storage
|
||||
and retrieval service. We'd suggest you to start [forced bulk initialization] soon if possible as this process
|
||||
places relatively high IP workload on your storage system and is better to be carried out gradually and over a
|
||||
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://docs.filecoin.io/mine/lotus/dagstore/#forcing-bulk-initialization).
|
||||
longer timeframe. Read how to do properly perform a force bulk initialization [here](https://lotus.filecoin.io/storage-providers/operate/dagstore/#forcing-bulk-initialization).
|
||||
- ⏮ Rollback Alert(from v1.11.2-rcX to any version lower): If a storages deal is initiated with M1/v1.11.2(-rcX)
|
||||
release, it needs to get to the `StorageDealAwaitingPrecommit` state before you can do a version rollback or the markets process may panic.
|
||||
- 💙 **Special thanks to [MinerX fellows for testing and providing valuable feedbacks](https://github.com/filecoin-project/lotus/discussions/6852) for Dagstore in the past month!**
|
||||
@ -1485,8 +1873,8 @@ Contributors
|
||||
This is a **highly recommended** but optional Lotus v1.11.1 release that introduces many deal making and datastore improvements and new features along with other bug fixes.
|
||||
|
||||
## Highlights
|
||||
- ⭐️⭐️⭐️[**lotus-miner market subsystem**](https://docs.filecoin.io/mine/lotus/split-markets-miners/#frontmatter-title) is introduced in this release! It is **highly recommended** for storage providers to run markets processes on a separate machine! Doing so, only this machine needs to exposes public ports for deal making. This also means that the other miner operations can now be completely isolated by from the deal making processes and storage providers can stop and restarts the markets process without affecting an ongoing Winning/Window PoSt!
|
||||
- More details on the concepts, architecture and how to split the market process can be found [here](https://docs.filecoin.io/mine/lotus/split-markets-miners/#concepts).
|
||||
- ⭐️⭐️⭐️[**lotus-miner market subsystem**](https://lotus.filecoin.io/storage-providers/advanced-configurations/split-markets-miners/) is introduced in this release! It is **highly recommended** for storage providers to run markets processes on a separate machine! Doing so, only this machine needs to exposes public ports for deal making. This also means that the other miner operations can now be completely isolated by from the deal making processes and storage providers can stop and restarts the markets process without affecting an ongoing Winning/Window PoSt!
|
||||
- More details on the concepts, architecture and how to split the market process can be found [here](https://lotus.filecoin.io/storage-providers/advanced-configurations/split-markets-miners/#concepts).
|
||||
- Base on your system setup(running on separate machines, same machine and so on), please see the suggested practice by community members [here](https://github.com/filecoin-project/lotus/discussions/7047#discussion-3515335).
|
||||
- Note: if you are running lotus-worker on a different machine, you will need to set `MARKETS_API_INFO` for certain CLI to work properly. This will be improved by #7072.
|
||||
- Huge thanks to MinerX fellows for [helping testing the implementation, reporting the issues so they were fixed by now and providing feedbacks](https://github.com/filecoin-project/lotus/discussions/6861) to user docs in the past three weeks!
|
||||
@ -1496,7 +1884,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
||||
- `AvailableBalanceBuffer`: minimum available balance to keep in the miner actor before sending it with messages, default is 0FIL.
|
||||
- `DisableCollateralFallback`: whether to send collateral with messages even if there is no available balance in the miner actor, default is `false`.
|
||||
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
||||
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://docs.filecoin.io/mine/lotus/miner-addresses/#control-addresses).
|
||||
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
|
||||
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
||||
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
|
||||
|
||||
@ -1925,7 +2313,7 @@ Note that this release is built on top of Lotus v1.9.0. Enterprising users can u
|
||||
|
||||
FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md) and [0013](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0013.md) combine to allow for a significant increase in the rate of onboarding storage on the Filecoin network. This aims to lead to more useful data being stored on the network, reduced network congestion, and lower network base fee.
|
||||
|
||||
**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.**
|
||||
**Check out the documentation [here]((https://lotus.filecoin.io/storage-providers/advanced-configurations/sealing/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://lotus.filecoin.io/storage-providers/setup/configuration/#fees-section) for fee config options, and explanations of the new features.**
|
||||
|
||||
Note:
|
||||
- We recommend to keep `PreCommitSectorsBatch` as 1.
|
||||
@ -1941,7 +2329,7 @@ Given these assumptions:
|
||||
|
||||
- We'd expect a network storage growth rate of around 530PiB per day. 😳 🎉 🥳 😅
|
||||
- We'd expect network bandwidth dedicated to `SubmitWindowedPoSt` to grow by about 0.02% per day.
|
||||
- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://docs.filecoin.io/get-started/lotus/chain/#lightweight-snapshot)) size to grow by 1.16GiB per day.
|
||||
- We'd expect the [state-tree](https://spec.filecoin.io/#section-systems.filecoin_vm.state_tree) (and therefore [snapshot](https://lotus.filecoin.io/lotus/manage/chain-management/#lightweight-snapshot)) size to grow by 1.16GiB per day.
|
||||
- Nearly all of the state-tree growth is expected to come from new sector metadata.
|
||||
- We'd expect the daily lotus datastore growth rate to increase by about 10-15% (from current ~21GiB/day).
|
||||
- Most "growth" of the lotus datastore is due to "churn", historical data that's no longer referenced by the latest state-tree.
|
||||
@ -1962,7 +2350,7 @@ Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-pro
|
||||
|
||||
- Implement FIP-0015 ([filecoin-project/lotus#6361](https://github.com/filecoin-project/lotus/pull/6361))
|
||||
- Integrate FIP0013 and FIP0008 ([filecoin-project/lotus#6235](https://github.com/filecoin-project/lotus/pull/6235))
|
||||
- [Configuration docs and cli examples](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch)
|
||||
- [Configuration docs and cli examples](https://lotus.filecoin.io/storage-providers/advanced-configurations/sealing/#precommitsectorsbatch)
|
||||
- [cli docs](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus-miner.md#lotus-miner-sectors-batching)
|
||||
- Introduce gas prices for aggregate verifications ([filecoin-project/lotus#6347](https://github.com/filecoin-project/lotus/pull/6347))
|
||||
- Introduce v5 actors ([filecoin-project/lotus#6195](https://github.com/filecoin-project/lotus/pull/6195))
|
||||
@ -2280,7 +2668,7 @@ Note that this release does NOT set an upgrade epoch for v3 actors to take effec
|
||||
- [#5309](https://github.com/filecoin-project/lotus/pull/5309) Batch multiple deals in one `PublishStorageMessages`
|
||||
- [#5411](https://github.com/filecoin-project/lotus/pull/5411) Handle batch `PublishStorageDeals` message in sealing recovery
|
||||
- [#5505](https://github.com/filecoin-project/lotus/pull/5505) Exclude expired deals from batching in `PublishStorageDeals` messages
|
||||
- Added `PublishMsgPeriod` and `MaxDealsPerPublishMsg` to miner `Dealmaking` [configuration](https://docs.filecoin.io/mine/lotus/miner-configuration/#dealmaking-section). See how they work [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#publishing-several-deals-in-one-message).
|
||||
- Added `PublishMsgPeriod` and `MaxDealsPerPublishMsg` to miner `Dealmaking` [configuration](https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#dealmaking-section). See how they work [here](https://lotus.filecoin.io/storage-providers/advanced-configurations/market/#publishing-several-deals-in-one-message).
|
||||
- [#5538](https://github.com/filecoin-project/lotus/pull/5538), [#5549](https://github.com/filecoin-project/lotus/pull/5549) Added a command to list pending deals and force publish messages.
|
||||
- Run `lotus-miner market pending-publish`
|
||||
- [#5428](https://github.com/filecoin-project/lotus/pull/5428) Moved waiting for `PublishStorageDeals` messages' receipt from markets to lotus
|
||||
|
134
Dockerfile
Normal file
134
Dockerfile
Normal file
@ -0,0 +1,134 @@
|
||||
#####################################
|
||||
FROM golang:1.18.8-buster AS lotus-builder
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
|
||||
ENV XDG_CACHE_HOME="/tmp"
|
||||
|
||||
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
|
||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||
CARGO_HOME=/usr/local/cargo \
|
||||
PATH=/usr/local/cargo/bin:$PATH \
|
||||
RUST_VERSION=1.63.0
|
||||
|
||||
RUN set -eux; \
|
||||
dpkgArch="$(dpkg --print-architecture)"; \
|
||||
case "${dpkgArch##*-}" in \
|
||||
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
|
||||
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
|
||||
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
|
||||
esac; \
|
||||
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
|
||||
wget "$url"; \
|
||||
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
|
||||
chmod +x rustup-init; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
### make configurable filecoin-ffi build
|
||||
ARG FFI_BUILD_FROM_SOURCE=0
|
||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
||||
|
||||
RUN make clean deps
|
||||
|
||||
ARG RUSTFLAGS=""
|
||||
ARG GOFLAGS=""
|
||||
|
||||
RUN make buildall
|
||||
|
||||
#####################################
|
||||
FROM ubuntu:20.04 AS lotus-base
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
# Base resources
|
||||
COPY --from=lotus-builder /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=lotus-builder /lib/*/libdl.so.2 /lib/
|
||||
COPY --from=lotus-builder /lib/*/librt.so.1 /lib/
|
||||
COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
|
||||
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
|
||||
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
&& mkdir -p /etc/OpenCL/vendors \
|
||||
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
||||
|
||||
#####################################
|
||||
FROM lotus-base AS lotus
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||
COPY scripts/docker-lotus-entrypoint.sh /
|
||||
|
||||
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest
|
||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
|
||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||
ENV LOTUS_PATH /var/lib/lotus
|
||||
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
||||
|
||||
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
||||
RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
||||
|
||||
VOLUME /var/lib/lotus
|
||||
VOLUME /var/tmp/filecoin-proof-parameters
|
||||
|
||||
USER fc
|
||||
|
||||
EXPOSE 1234
|
||||
|
||||
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
|
||||
|
||||
CMD ["-help"]
|
||||
|
||||
#####################################
|
||||
FROM lotus-base AS lotus-all-in-one
|
||||
|
||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
||||
ENV LOTUS_PATH /var/lib/lotus
|
||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
||||
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
||||
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
|
||||
|
||||
RUN mkdir /var/tmp/filecoin-proof-parameters
|
||||
RUN mkdir /var/lib/lotus
|
||||
RUN mkdir /var/lib/lotus-miner
|
||||
RUN mkdir /var/lib/lotus-worker
|
||||
RUN mkdir /var/lib/lotus-wallet
|
||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
||||
RUN chown fc: /var/lib/lotus
|
||||
RUN chown fc: /var/lib/lotus-miner
|
||||
RUN chown fc: /var/lib/lotus-worker
|
||||
RUN chown fc: /var/lib/lotus-wallet
|
||||
|
||||
|
||||
VOLUME /var/tmp/filecoin-proof-parameters
|
||||
VOLUME /var/lib/lotus
|
||||
VOLUME /var/lib/lotus-miner
|
||||
VOLUME /var/lib/lotus-worker
|
||||
VOLUME /var/lib/lotus-wallet
|
||||
|
||||
EXPOSE 1234
|
||||
EXPOSE 2345
|
||||
EXPOSE 3456
|
||||
EXPOSE 1777
|
@ -1,30 +1,47 @@
|
||||
FROM golang:1.18.1-buster AS builder-deps
|
||||
##### DEPRECATED
|
||||
|
||||
FROM golang:1.18.8-buster AS builder-deps
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||
|
||||
ARG RUST_VERSION=nightly
|
||||
ENV XDG_CACHE_HOME="/tmp"
|
||||
|
||||
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
|
||||
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||
CARGO_HOME=/usr/local/cargo \
|
||||
PATH=/usr/local/cargo/bin:$PATH
|
||||
PATH=/usr/local/cargo/bin:$PATH \
|
||||
RUST_VERSION=1.63.0
|
||||
|
||||
RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
|
||||
RUN set -eux; \
|
||||
dpkgArch="$(dpkg --print-architecture)"; \
|
||||
case "${dpkgArch##*-}" in \
|
||||
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
|
||||
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
|
||||
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
|
||||
esac; \
|
||||
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
|
||||
wget "$url"; \
|
||||
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
|
||||
chmod +x rustup-init; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
|
||||
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
### end rust
|
||||
|
||||
FROM builder-deps AS builder-local
|
||||
MAINTAINER Lotus Development Team
|
||||
|
||||
COPY ./ /opt/filecoin
|
||||
WORKDIR /opt/filecoin
|
||||
|
||||
### make configurable filecoin-ffi build
|
||||
ARG FFI_BUILD_FROM_SOURCE=0
|
||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
||||
|
||||
RUN make clean deps
|
||||
|
||||
|
||||
@ -52,14 +69,14 @@ MAINTAINER Lotus Development Team
|
||||
|
||||
# Base resources
|
||||
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libdl.so.2 /lib/
|
||||
COPY --from=builder /lib/*/librt.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libgcc_s.so.1 /lib/
|
||||
COPY --from=builder /lib/*/libutil.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/*/libltdl.so.7 /lib/
|
||||
COPY --from=builder /usr/lib/*/libnuma.so.1 /lib/
|
||||
COPY --from=builder /usr/lib/*/libhwloc.so.5 /lib/
|
||||
COPY --from=builder /usr/lib/*/libOpenCL.so.1 /lib/
|
||||
|
||||
RUN useradd -r -u 532 -U fc \
|
||||
&& mkdir -p /etc/OpenCL/vendors \
|
||||
|
1
GO_VERSION_MIN
Normal file
1
GO_VERSION_MIN
Normal file
@ -0,0 +1 @@
|
||||
1.18.8
|
27
Makefile
27
Makefile
@ -8,9 +8,11 @@ unexport GOFLAGS
|
||||
GOCC?=go
|
||||
|
||||
GOVERSION:=$(shell $(GOCC) version | tr ' ' '\n' | grep go1 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
|
||||
ifeq ($(shell expr $(GOVERSION) \< 1018001), 1)
|
||||
GOVERSIONMIN:=$(shell cat GO_VERSION_MIN | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
|
||||
|
||||
ifeq ($(shell expr $(GOVERSION) \< $(GOVERSIONMIN)), 1)
|
||||
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
|
||||
$(error Update Golang to version to at least 1.18.1)
|
||||
$(error Update Golang to version to at least $(shell cat GO_VERSION_MIN))
|
||||
endif
|
||||
|
||||
# git modules that need to be loaded
|
||||
@ -64,7 +66,7 @@ CLEAN+=build/.update-modules
|
||||
deps: $(BUILD_DEPS)
|
||||
.PHONY: deps
|
||||
|
||||
build-devnets: build lotus-seed lotus-shed lotus-wallet lotus-gateway lotus-fountain lotus-stats
|
||||
build-devnets: build lotus-seed lotus-shed
|
||||
.PHONY: build-devnets
|
||||
|
||||
debug: GOFLAGS+=-tags=debug
|
||||
@ -160,18 +162,6 @@ benchmarks:
|
||||
@curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}"
|
||||
.PHONY: benchmarks
|
||||
|
||||
lotus-pond: 2k
|
||||
$(GOCC) build -o lotus-pond ./lotuspond
|
||||
.PHONY: lotus-pond
|
||||
BINS+=lotus-pond
|
||||
|
||||
lotus-pond-front:
|
||||
(cd lotuspond/front && npm i && CI=false npm run build)
|
||||
.PHONY: lotus-pond-front
|
||||
|
||||
lotus-pond-app: lotus-pond-front lotus-pond
|
||||
.PHONY: lotus-pond-app
|
||||
|
||||
lotus-fountain:
|
||||
rm -f lotus-fountain
|
||||
$(GOCC) build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
|
||||
@ -299,9 +289,6 @@ type-gen: api-gen
|
||||
$(GOCC) generate -x ./...
|
||||
goimports -w api/
|
||||
|
||||
method-gen: api-gen
|
||||
(cd ./lotuspond/front/src/chain && $(GOCC) run ./methodgen.go)
|
||||
|
||||
actors-code-gen:
|
||||
$(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json
|
||||
$(GOCC) run ./chain/actors/agen
|
||||
@ -367,7 +354,7 @@ docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||
fiximports:
|
||||
./scripts/fiximports
|
||||
|
||||
gen: actors-code-gen type-gen method-gen cfgdoc-gen docsgen api-gen circleci bundle-gen fiximports
|
||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci bundle-gen fiximports
|
||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||
.PHONY: gen
|
||||
|
||||
@ -379,7 +366,7 @@ snap: lotus lotus-miner lotus-worker
|
||||
|
||||
# separate from gen because it needs binaries
|
||||
docsgen-cli: lotus lotus-miner lotus-worker
|
||||
python ./scripts/generate-lotus-cli.py
|
||||
python3 ./scripts/generate-lotus-cli.py
|
||||
./lotus config default > documentation/en/default-lotus-config.toml
|
||||
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
||||
.PHONY: docsgen-cli
|
||||
|
20
README.md
20
README.md
@ -1,5 +1,5 @@
|
||||
<p align="center">
|
||||
<a href="https://docs.filecoin.io/" title="Filecoin Docs">
|
||||
<a href="https://lotus.filecoin.io/" title="Filecoin Docs">
|
||||
<img src="documentation/images/lotus_logo_h.png" alt="Project Lotus Logo" width="244" />
|
||||
</a>
|
||||
</p>
|
||||
@ -10,7 +10,7 @@
|
||||
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
||||
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
||||
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.17-blue.svg" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.8-blue.svg" /></a>
|
||||
<br>
|
||||
</p>
|
||||
|
||||
@ -67,14 +67,14 @@ Fedora:
|
||||
sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc hwloc-devel
|
||||
```
|
||||
|
||||
For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos)
|
||||
For other distributions you can find the required dependencies [here.](https://lotus.filecoin.io/lotus/install/prerequisites/#supported-platforms) For instructions specific to macOS, you can find them [here.](https://lotus.filecoin.io/lotus/install/macos/)
|
||||
|
||||
#### Go
|
||||
|
||||
To build Lotus, you need a working installation of [Go 1.18.1 or higher](https://golang.org/dl/):
|
||||
To build Lotus, you need a working installation of [Go 1.18.8 or higher](https://golang.org/dl/):
|
||||
|
||||
```bash
|
||||
wget -c https://golang.org/dl/go1.18.1.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
wget -c https://golang.org/dl/go1.18.8.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||
```
|
||||
|
||||
**TIP:**
|
||||
@ -101,7 +101,7 @@ Note: The default branch `master` is the dev branch where the latest new feature
|
||||
|
||||
2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases).
|
||||
|
||||
If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding.
|
||||
If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://lotus.filecoin.io/lotus/manage/switch-networks/) before proceeding.
|
||||
|
||||
For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below.
|
||||
|
||||
@ -113,8 +113,8 @@ Note: The default branch `master` is the dev branch where the latest new feature
|
||||
|
||||
Currently, the latest code on the _master_ branch corresponds to mainnet.
|
||||
|
||||
3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)".
|
||||
4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed.
|
||||
3. If you are in China, see "[Lotus: tips when running in China](https://lotus.filecoin.io/lotus/configure/nodes-in-china/)".
|
||||
4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://lotus.filecoin.io/lotus/install/prerequisites/). Note, if you are building the proof binaries from source, [installing rustup](https://lotus.filecoin.io/lotus/install/linux/#rustup) is also needed.
|
||||
|
||||
5. Build and install Lotus:
|
||||
|
||||
@ -129,9 +129,9 @@ Note: The default branch `master` is the dev branch where the latest new feature
|
||||
|
||||
This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`.
|
||||
|
||||
`lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder.
|
||||
`lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://lotus.filecoin.io/lotus/configure/defaults/#environment-variables) for information on how to customize the Lotus folder.
|
||||
|
||||
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain).
|
||||
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain).
|
||||
|
||||
## License
|
||||
|
||||
|
@ -3,6 +3,7 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
@ -49,6 +50,9 @@ type Common interface {
|
||||
// trigger graceful shutdown
|
||||
Shutdown(context.Context) error //perm:admin
|
||||
|
||||
// StartTime returns node start time
|
||||
StartTime(context.Context) (time.Time, error) //perm:read
|
||||
|
||||
// Session returns a random UUID of api provider session
|
||||
Session(context.Context) (uuid.UUID, error) //perm:read
|
||||
|
||||
|
42
api/api_errors.go
Normal file
42
api/api_errors.go
Normal file
@ -0,0 +1,42 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
EOutOfGas = iota + jsonrpc.FirstUserCode
|
||||
EActorNotFound
|
||||
)
|
||||
|
||||
type ErrOutOfGas struct{}
|
||||
|
||||
func (e *ErrOutOfGas) Error() string {
|
||||
return "call ran out of gas"
|
||||
}
|
||||
|
||||
type ErrActorNotFound struct{}
|
||||
|
||||
func (e *ErrActorNotFound) Error() string {
|
||||
return "actor not found"
|
||||
}
|
||||
|
||||
var RPCErrors = jsonrpc.NewErrors()
|
||||
|
||||
func ErrorIsIn(err error, errorTypes []error) bool {
|
||||
for _, etype := range errorTypes {
|
||||
tmp := reflect.New(reflect.PointerTo(reflect.ValueOf(etype).Elem().Type())).Interface()
|
||||
if errors.As(err, tmp) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
RPCErrors.Register(EOutOfGas, new(*ErrOutOfGas))
|
||||
RPCErrors.Register(EActorNotFound, new(*ErrActorNotFound))
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@ -17,9 +18,10 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/market"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
@ -527,6 +529,17 @@ type FullNode interface {
|
||||
StateMarketDeals(context.Context, types.TipSetKey) (map[string]*MarketDeal, error) //perm:read
|
||||
// StateMarketStorageDeal returns information about the indicated deal
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
|
||||
// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal. Returns nil if
|
||||
// pending allocation is not found.
|
||||
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetAllocation returns the allocation for a given address and allocation ID.
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetAllocations returns the all the allocations for a given client.
|
||||
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetClaim returns the claim for a given address and claim ID.
|
||||
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
|
||||
// StateGetClaims returns the all the claims for a given provider.
|
||||
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
|
||||
// StateComputeDataCID computes DataCID from a set of on-chain deals
|
||||
StateComputeDataCID(ctx context.Context, maddr address.Address, sectorType abi.RegisteredSealProof, deals []abi.DealID, tsk types.TipSetKey) (cid.Cid, error) //perm:read
|
||||
// StateLookupID retrieves the ID address of the given address
|
||||
@ -751,6 +764,9 @@ type FullNode interface {
|
||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
||||
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||
}
|
||||
|
||||
type StorageAsk struct {
|
||||
@ -1000,8 +1016,12 @@ type RetrievalOrder struct {
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeer *retrievalmarket.RetrievalPeer
|
||||
|
||||
RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"`
|
||||
}
|
||||
|
||||
type RemoteStoreID = uuid.UUID
|
||||
|
||||
type InvocResult struct {
|
||||
MsgCid cid.Cid
|
||||
Msg *types.Message
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
|
||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||
|
@ -17,8 +17,9 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/market"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
@ -54,6 +55,11 @@ type StorageMiner interface {
|
||||
// and does not wait for message execution
|
||||
ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) //perm:admin
|
||||
|
||||
// BeneficiaryWithdrawBalance allows the beneficiary of a miner to withdraw balance from miner actor
|
||||
// Specify amount as "0" to withdraw full balance. This method returns a message CID
|
||||
// and does not wait for message execution
|
||||
BeneficiaryWithdrawBalance(context.Context, abi.TokenAmount) (cid.Cid, error) //perm:admin
|
||||
|
||||
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
||||
|
||||
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
|
||||
@ -139,6 +145,8 @@ type StorageMiner interface {
|
||||
// SectorNumFree drops a sector reservation
|
||||
SectorNumFree(ctx context.Context, name string) error //perm:admin
|
||||
|
||||
SectorReceive(ctx context.Context, meta RemoteSectorMeta) error //perm:admin
|
||||
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error //perm:admin retry:true
|
||||
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
|
||||
@ -161,6 +169,7 @@ type StorageMiner interface {
|
||||
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnDownloadSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||
|
||||
// SealingSchedDiag dumps internal sealing scheduler state
|
||||
@ -311,7 +320,7 @@ type StorageMiner interface {
|
||||
// the path specified when calling CreateBackup is within the base path
|
||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) //perm:admin
|
||||
|
||||
ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read
|
||||
|
||||
@ -408,6 +417,10 @@ func (st *SealSeed) Equals(ost *SealSeed) bool {
|
||||
|
||||
type SectorState string
|
||||
|
||||
func (s *SectorState) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
type AddrUse int
|
||||
|
||||
const (
|
||||
@ -499,3 +512,109 @@ type NumAssignerMeta struct {
|
||||
|
||||
Next abi.SectorNumber
|
||||
}
|
||||
|
||||
type RemoteSectorMeta struct {
|
||||
////////
|
||||
// BASIC SECTOR INFORMATION
|
||||
|
||||
// State specifies the first state the sector will enter after being imported
|
||||
// Must be one of the following states:
|
||||
// * Packing
|
||||
// * GetTicket
|
||||
// * PreCommitting
|
||||
// * SubmitCommit
|
||||
// * Proving/Available
|
||||
State SectorState
|
||||
|
||||
Sector abi.SectorID
|
||||
Type abi.RegisteredSealProof
|
||||
|
||||
////////
|
||||
// SEALING METADATA
|
||||
// (allows lotus to continue the sealing process)
|
||||
|
||||
// Required in Packing and later
|
||||
Pieces []SectorPiece // todo better type?
|
||||
|
||||
// Required in PreCommitting and later
|
||||
TicketValue abi.SealRandomness
|
||||
TicketEpoch abi.ChainEpoch
|
||||
PreCommit1Out storiface.PreCommit1Out // todo specify better
|
||||
|
||||
CommD *cid.Cid
|
||||
CommR *cid.Cid // SectorKey
|
||||
|
||||
// Required in SubmitCommit and later
|
||||
PreCommitInfo *miner.SectorPreCommitInfo
|
||||
PreCommitDeposit *big.Int
|
||||
PreCommitMessage *cid.Cid
|
||||
PreCommitTipSet types.TipSetKey
|
||||
|
||||
SeedValue abi.InteractiveSealRandomness
|
||||
SeedEpoch abi.ChainEpoch
|
||||
|
||||
CommitProof []byte
|
||||
|
||||
// Required in Proving/Available
|
||||
CommitMessage *cid.Cid
|
||||
|
||||
// Optional sector metadata to import
|
||||
Log []SectorLog
|
||||
|
||||
////////
|
||||
// SECTOR DATA SOURCE
|
||||
|
||||
// Sector urls - lotus will use those for fetching files into local storage
|
||||
|
||||
// Required in all states
|
||||
DataUnsealed *storiface.SectorLocation
|
||||
|
||||
// Required in PreCommitting and later
|
||||
DataSealed *storiface.SectorLocation
|
||||
DataCache *storiface.SectorLocation
|
||||
|
||||
////////
|
||||
// SEALING SERVICE HOOKS
|
||||
|
||||
// URL
|
||||
// RemoteCommit1Endpoint is an URL of POST endpoint which lotus will call requesting Commit1 (seal_commit_phase1)
|
||||
// request body will be json-serialized RemoteCommit1Params struct
|
||||
RemoteCommit1Endpoint string
|
||||
|
||||
// RemoteCommit2Endpoint is an URL of POST endpoint which lotus will call requesting Commit2 (seal_commit_phase2)
|
||||
// request body will be json-serialized RemoteCommit2Params struct
|
||||
RemoteCommit2Endpoint string
|
||||
|
||||
// RemoteSealingDoneEndpoint is called after the sector exists the sealing pipeline
|
||||
// request body will be json-serialized RemoteSealingDoneParams struct
|
||||
RemoteSealingDoneEndpoint string
|
||||
}
|
||||
|
||||
type RemoteCommit1Params struct {
|
||||
Ticket, Seed []byte
|
||||
|
||||
Unsealed cid.Cid
|
||||
Sealed cid.Cid
|
||||
|
||||
ProofType abi.RegisteredSealProof
|
||||
}
|
||||
|
||||
type RemoteCommit2Params struct {
|
||||
Sector abi.SectorID
|
||||
ProofType abi.RegisteredSealProof
|
||||
|
||||
// todo spec better
|
||||
Commit1Out storiface.Commit1Out
|
||||
}
|
||||
|
||||
type RemoteSealingDoneParams struct {
|
||||
// Successful is true if the sector has entered state considered as "successfully sealed"
|
||||
Successful bool
|
||||
|
||||
// State is the state the sector has entered
|
||||
// For example "Proving" / "Removing"
|
||||
State string
|
||||
|
||||
// Optional commit message CID
|
||||
CommitMessage *cid.Cid
|
||||
}
|
||||
|
@ -12,6 +12,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
)
|
||||
|
||||
func goCmd() string {
|
||||
@ -124,3 +127,18 @@ func TestPermTags(t *testing.T) {
|
||||
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
||||
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
||||
}
|
||||
|
||||
func TestRetryErrorIsInTrue(t *testing.T) {
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
require.True(t, ErrorIsIn(&jsonrpc.RPCConnectionError{}, errorsToRetry))
|
||||
}
|
||||
|
||||
func TestRetryErrorIsInFalse(t *testing.T) {
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
require.False(t, ErrorIsIn(xerrors.Errorf("random error"), errorsToRetry))
|
||||
}
|
||||
|
||||
func TestRetryWrappedErrorIsInTrue(t *testing.T) {
|
||||
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
|
||||
require.True(t, ErrorIsIn(xerrors.Errorf("wrapped: %w", &jsonrpc.RPCConnectionError{}), errorsToRetry))
|
||||
}
|
||||
|
@ -39,16 +39,17 @@ type Worker interface {
|
||||
SealPreCommit2(ctx context.Context, sector storiface.SectorRef, pc1o storiface.PreCommit1Out) (storiface.CallID, error) //perm:admin
|
||||
SealCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storiface.SectorCids) (storiface.CallID, error) //perm:admin
|
||||
SealCommit2(ctx context.Context, sector storiface.SectorRef, c1o storiface.Commit1Out) (storiface.CallID, error) //perm:admin
|
||||
FinalizeSector(ctx context.Context, sector storiface.SectorRef, keepUnsealed []storiface.Range) (storiface.CallID, error) //perm:admin
|
||||
FinalizeReplicaUpdate(ctx context.Context, sector storiface.SectorRef, keepUnsealed []storiface.Range) (storiface.CallID, error) //perm:admin
|
||||
FinalizeSector(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) //perm:admin
|
||||
FinalizeReplicaUpdate(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) //perm:admin
|
||||
ReplicaUpdate(ctx context.Context, sector storiface.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate1(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
ProveReplicaUpdate2(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storiface.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
|
||||
GenerateSectorKeyFromData(ctx context.Context, sector storiface.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
ReleaseUnsealed(ctx context.Context, sector storiface.SectorRef, safeToFree []storiface.Range) (storiface.CallID, error) //perm:admin
|
||||
ReleaseUnsealed(ctx context.Context, sector storiface.SectorRef, keepUnsealed []storiface.Range) (storiface.CallID, error) //perm:admin
|
||||
MoveStorage(ctx context.Context, sector storiface.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
|
||||
UnsealPiece(context.Context, storiface.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
||||
Fetch(context.Context, storiface.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
|
||||
DownloadSectorData(ctx context.Context, sector storiface.SectorRef, finalized bool, src map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) //perm:admin
|
||||
|
||||
GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) //perm:admin
|
||||
GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) //perm:admin
|
||||
|
125
api/cbor_gen.go
125
api/cbor_gen.go
@ -13,8 +13,8 @@ import (
|
||||
xerrors "golang.org/x/xerrors"
|
||||
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
market "github.com/filecoin-project/go-state-types/builtin/v8/market"
|
||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
market "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
||||
)
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
@ -1005,6 +1005,129 @@ func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *SectorPiece) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Piece (abi.PieceInfo) (struct)
|
||||
if len("Piece") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Piece\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("Piece"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Piece")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.Piece.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.DealInfo (api.PieceDealInfo) (struct)
|
||||
if len("DealInfo") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"DealInfo\" was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("DealInfo"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("DealInfo")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.DealInfo.MarshalCBOR(cw); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SectorPiece) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = SectorPiece{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("SectorPiece: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Piece (abi.PieceInfo) (struct)
|
||||
case "Piece":
|
||||
|
||||
{
|
||||
|
||||
if err := t.Piece.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Piece: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.DealInfo (api.PieceDealInfo) (struct)
|
||||
case "DealInfo":
|
||||
|
||||
{
|
||||
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
t.DealInfo = new(PieceDealInfo)
|
||||
if err := t.DealInfo.UnmarshalCBOR(cr); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.DealInfo pointer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
default:
|
||||
// Field doesn't exist on this type, so ignore it
|
||||
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||
var res v0api.CommonNetStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res), requestHeader)
|
||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
@ -29,7 +29,7 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade
|
||||
var res v0api.FullNodeStruct
|
||||
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res), requestHeader)
|
||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
@ -38,7 +38,7 @@ func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Heade
|
||||
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
var res v1api.FullNodeStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res), requestHeader)
|
||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
@ -72,6 +72,7 @@ func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.H
|
||||
api.GetInternalStructs(&res), requestHeader,
|
||||
append([]jsonrpc.Option{
|
||||
rpcenc.ReaderParamEncoder(pushUrl),
|
||||
jsonrpc.WithErrors(api.RPCErrors),
|
||||
}, opts...)...)
|
||||
|
||||
return &res, closer, err
|
||||
@ -90,6 +91,7 @@ func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header)
|
||||
rpcenc.ReaderParamEncoder(pushUrl),
|
||||
jsonrpc.WithNoReconnect(),
|
||||
jsonrpc.WithTimeout(30*time.Second),
|
||||
jsonrpc.WithErrors(api.RPCErrors),
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
@ -101,7 +103,7 @@ func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res),
|
||||
requestHeader,
|
||||
opts...,
|
||||
append(opts, jsonrpc.WithErrors(api.RPCErrors))...,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
@ -113,7 +115,7 @@ func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res),
|
||||
requestHeader,
|
||||
opts...,
|
||||
append(opts, jsonrpc.WithErrors(api.RPCErrors))...,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
@ -124,6 +126,7 @@ func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header)
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
api.GetInternalStructs(&res),
|
||||
requestHeader,
|
||||
jsonrpc.WithErrors(api.RPCErrors),
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -23,6 +24,8 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
@ -31,6 +34,7 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
@ -136,7 +140,15 @@ func init() {
|
||||
addExample(&textSelExample)
|
||||
addExample(&apiSelExample)
|
||||
addExample(network.ReachabilityPublic)
|
||||
addExample(build.NewestNetworkVersion)
|
||||
addExample(build.TestNetworkVersion)
|
||||
allocationId := verifreg.AllocationId(0)
|
||||
addExample(allocationId)
|
||||
addExample(&allocationId)
|
||||
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
|
||||
claimId := verifreg.ClaimId(0)
|
||||
addExample(claimId)
|
||||
addExample(&claimId)
|
||||
addExample(map[verifreg.ClaimId]verifreg.Claim{})
|
||||
addExample(map[string]int{"name": 42})
|
||||
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
|
||||
addExample(&types.ExecutionTrace{
|
||||
@ -339,7 +351,23 @@ func init() {
|
||||
addExample(map[string]bitfield.BitField{
|
||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||
})
|
||||
addExample(&api.RaftStateData{
|
||||
NonceMap: make(map[address.Address]uint64),
|
||||
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||
})
|
||||
|
||||
addExample(http.Header{
|
||||
"Authorization": []string{"Bearer ey.."},
|
||||
})
|
||||
|
||||
addExample(map[storiface.SectorFileType]storiface.SectorLocation{
|
||||
storiface.FTSealed: {
|
||||
Local: false,
|
||||
URL: "https://example.com/sealingservice/sectors/s-f0123-12345",
|
||||
Headers: nil,
|
||||
},
|
||||
})
|
||||
addExample(&uuid.UUID{})
|
||||
}
|
||||
|
||||
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||
@ -430,7 +458,8 @@ func exampleStruct(method string, t, parent reflect.Type) interface{} {
|
||||
if f.Type == parent {
|
||||
continue
|
||||
}
|
||||
if strings.Title(f.Name) == f.Name {
|
||||
caser := cases.Title(language.English)
|
||||
if caser.String(f.Name) == f.Name {
|
||||
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,9 @@ import (
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
miner "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||
dline "github.com/filecoin-project/go-state-types/dline"
|
||||
network "github.com/filecoin-project/go-state-types/network"
|
||||
@ -2243,6 +2244,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||
}
|
||||
|
||||
// RaftLeader mocks base method.
|
||||
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
||||
ret0, _ := ret[0].(peer.ID)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftLeader indicates an expected call of RaftLeader.
|
||||
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
||||
}
|
||||
|
||||
// RaftState mocks base method.
|
||||
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "RaftState", arg0)
|
||||
ret0, _ := ret[0].(*api.RaftStateData)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// RaftState indicates an expected call of RaftState.
|
||||
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
||||
}
|
||||
|
||||
// Session mocks base method.
|
||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2272,6 +2303,21 @@ func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
|
||||
}
|
||||
|
||||
// StartTime mocks base method.
|
||||
func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StartTime", arg0)
|
||||
ret0, _ := ret[0].(time.Time)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StartTime indicates an expected call of StartTime.
|
||||
func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0)
|
||||
}
|
||||
|
||||
// StateAccountKey mocks base method.
|
||||
func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2348,10 +2394,10 @@ func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gom
|
||||
}
|
||||
|
||||
// StateChangedActors mocks base method.
|
||||
func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) {
|
||||
func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.ActorV5, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[string]types.Actor)
|
||||
ret0, _ := ret[0].(map[string]types.ActorV5)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@ -2453,10 +2499,10 @@ func (mr *MockFullNodeMockRecorder) StateEncodeParams(arg0, arg1, arg2, arg3 int
|
||||
}
|
||||
|
||||
// StateGetActor mocks base method.
|
||||
func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) {
|
||||
func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorV5, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*types.Actor)
|
||||
ret0, _ := ret[0].(*types.ActorV5)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@ -2467,6 +2513,51 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetAllocation mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(*verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocation indicates an expected call of StateGetAllocation.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocation), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// StateGetAllocationForPendingDeal mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocationForPendingDeal", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocationForPendingDeal indicates an expected call of StateGetAllocationForPendingDeal.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetAllocations mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocations", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocations indicates an expected call of StateGetAllocations.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocations(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocations), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetBeaconEntry mocks base method.
|
||||
func (m *MockFullNode) StateGetBeaconEntry(arg0 context.Context, arg1 abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2482,6 +2573,36 @@ func (mr *MockFullNodeMockRecorder) StateGetBeaconEntry(arg0, arg1 interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetBeaconEntry", reflect.TypeOf((*MockFullNode)(nil).StateGetBeaconEntry), arg0, arg1)
|
||||
}
|
||||
|
||||
// StateGetClaim mocks base method.
|
||||
func (m *MockFullNode) StateGetClaim(arg0 context.Context, arg1 address.Address, arg2 verifreg.ClaimId, arg3 types.TipSetKey) (*verifreg.Claim, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetClaim", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(*verifreg.Claim)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetClaim indicates an expected call of StateGetClaim.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetClaim(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaim", reflect.TypeOf((*MockFullNode)(nil).StateGetClaim), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// StateGetClaims mocks base method.
|
||||
func (m *MockFullNode) StateGetClaims(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetClaims", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetClaims indicates an expected call of StateGetClaims.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetClaims(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetClaims), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetNetworkParams mocks base method.
|
||||
func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*api.NetworkParams, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
183
api/proxy_gen.go
183
api/proxy_gen.go
@ -24,8 +24,9 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
@ -79,6 +80,8 @@ type CommonStruct struct {
|
||||
|
||||
Shutdown func(p0 context.Context) error `perm:"admin"`
|
||||
|
||||
StartTime func(p0 context.Context) (time.Time, error) `perm:"read"`
|
||||
|
||||
Version func(p0 context.Context) (APIVersion, error) `perm:"read"`
|
||||
}
|
||||
}
|
||||
@ -339,6 +342,10 @@ type FullNodeStruct struct {
|
||||
|
||||
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
|
||||
|
||||
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
|
||||
|
||||
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
|
||||
|
||||
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
|
||||
@ -365,8 +372,18 @@ type FullNodeStruct struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetBeaconEntry func(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) `perm:"read"`
|
||||
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetNetworkParams func(p0 context.Context) (*NetworkParams, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
@ -664,7 +681,9 @@ type StorageMinerStruct struct {
|
||||
|
||||
ActorWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
BeneficiaryWithdrawBalance func(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) `perm:"admin"`
|
||||
|
||||
CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) `perm:"admin"`
|
||||
|
||||
ComputeDataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (abi.PieceInfo, error) `perm:"admin"`
|
||||
|
||||
@ -776,6 +795,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
ReturnDataCid func(p0 context.Context, p1 storiface.CallID, p2 abi.PieceInfo, p3 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnDownloadSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFetch func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
|
||||
ReturnFinalizeReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"`
|
||||
@ -844,6 +865,8 @@ type StorageMinerStruct struct {
|
||||
|
||||
SectorPreCommitPending func(p0 context.Context) ([]abi.SectorID, error) `perm:"admin"`
|
||||
|
||||
SectorReceive func(p0 context.Context, p1 RemoteSectorMeta) error `perm:"admin"`
|
||||
|
||||
SectorRemove func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"`
|
||||
|
||||
SectorSetExpectedSealDuration func(p0 context.Context, p1 time.Duration) error `perm:"write"`
|
||||
@ -949,13 +972,15 @@ type WorkerStruct struct {
|
||||
|
||||
DataCid func(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 storiface.Data) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
DownloadSectorData func(p0 context.Context, p1 storiface.SectorRef, p2 bool, p3 map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
Enabled func(p0 context.Context) (bool, error) `perm:"admin"`
|
||||
|
||||
Fetch func(p0 context.Context, p1 storiface.SectorRef, p2 storiface.SectorFileType, p3 storiface.PathType, p4 storiface.AcquireMode) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeReplicaUpdate func(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeReplicaUpdate func(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
FinalizeSector func(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) `perm:"admin"`
|
||||
FinalizeSector func(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
GenerateSectorKeyFromData func(p0 context.Context, p1 storiface.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"`
|
||||
|
||||
@ -1154,6 +1179,17 @@ func (s *CommonStub) Shutdown(p0 context.Context) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) StartTime(p0 context.Context) (time.Time, error) {
|
||||
if s.Internal.StartTime == nil {
|
||||
return *new(time.Time), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StartTime(p0)
|
||||
}
|
||||
|
||||
func (s *CommonStub) StartTime(p0 context.Context) (time.Time, error) {
|
||||
return *new(time.Time), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *CommonStruct) Version(p0 context.Context) (APIVersion, error) {
|
||||
if s.Internal.Version == nil {
|
||||
return *new(APIVersion), ErrNotSupported
|
||||
@ -2441,6 +2477,28 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
if s.Internal.RaftLeader == nil {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftLeader(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
|
||||
return *new(peer.ID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
if s.Internal.RaftState == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.RaftState(p0)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
|
||||
if s.Internal.StateAccountKey == nil {
|
||||
return *new(address.Address), ErrNotSupported
|
||||
@ -2584,6 +2642,39 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocation == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocations == nil {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetBeaconEntry(p0 context.Context, p1 abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||
if s.Internal.StateGetBeaconEntry == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -2595,6 +2686,28 @@ func (s *FullNodeStub) StateGetBeaconEntry(p0 context.Context, p1 abi.ChainEpoch
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaim == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaims == nil {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetNetworkParams(p0 context.Context) (*NetworkParams, error) {
|
||||
if s.Internal.StateGetNetworkParams == nil {
|
||||
return nil, ErrNotSupported
|
||||
@ -4036,14 +4149,25 @@ func (s *StorageMinerStub) ActorWithdrawBalance(p0 context.Context, p1 abi.Token
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStruct) BeneficiaryWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||
if s.Internal.BeneficiaryWithdrawBalance == nil {
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
return s.Internal.BeneficiaryWithdrawBalance(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) BeneficiaryWithdrawBalance(p0 context.Context, p1 abi.TokenAmount) (cid.Cid, error) {
|
||||
return *new(cid.Cid), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
if s.Internal.CheckProvable == nil {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
return s.Internal.CheckProvable(p0, p1, p2, p3)
|
||||
return s.Internal.CheckProvable(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) {
|
||||
func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storiface.SectorRef) (map[abi.SectorNumber]string, error) {
|
||||
return *new(map[abi.SectorNumber]string), ErrNotSupported
|
||||
}
|
||||
|
||||
@ -4652,6 +4776,17 @@ func (s *StorageMinerStub) ReturnDataCid(p0 context.Context, p1 storiface.CallID
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnDownloadSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnDownloadSector == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.ReturnDownloadSector(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) ReturnDownloadSector(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) ReturnFetch(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error {
|
||||
if s.Internal.ReturnFetch == nil {
|
||||
return ErrNotSupported
|
||||
@ -5026,6 +5161,17 @@ func (s *StorageMinerStub) SectorPreCommitPending(p0 context.Context) ([]abi.Sec
|
||||
return *new([]abi.SectorID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorReceive(p0 context.Context, p1 RemoteSectorMeta) error {
|
||||
if s.Internal.SectorReceive == nil {
|
||||
return ErrNotSupported
|
||||
}
|
||||
return s.Internal.SectorReceive(p0, p1)
|
||||
}
|
||||
|
||||
func (s *StorageMinerStub) SectorReceive(p0 context.Context, p1 RemoteSectorMeta) error {
|
||||
return ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *StorageMinerStruct) SectorRemove(p0 context.Context, p1 abi.SectorNumber) error {
|
||||
if s.Internal.SectorRemove == nil {
|
||||
return ErrNotSupported
|
||||
@ -5510,6 +5656,17 @@ func (s *WorkerStub) DataCid(p0 context.Context, p1 abi.UnpaddedPieceSize, p2 st
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) DownloadSectorData(p0 context.Context, p1 storiface.SectorRef, p2 bool, p3 map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) {
|
||||
if s.Internal.DownloadSectorData == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.DownloadSectorData(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) DownloadSectorData(p0 context.Context, p1 storiface.SectorRef, p2 bool, p3 map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) Enabled(p0 context.Context) (bool, error) {
|
||||
if s.Internal.Enabled == nil {
|
||||
return false, ErrNotSupported
|
||||
@ -5532,25 +5689,25 @@ func (s *WorkerStub) Fetch(p0 context.Context, p1 storiface.SectorRef, p2 storif
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) {
|
||||
func (s *WorkerStruct) FinalizeReplicaUpdate(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeReplicaUpdate == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.FinalizeReplicaUpdate(p0, p1, p2)
|
||||
return s.Internal.FinalizeReplicaUpdate(p0, p1)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) {
|
||||
func (s *WorkerStub) FinalizeReplicaUpdate(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) {
|
||||
func (s *WorkerStruct) FinalizeSector(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) {
|
||||
if s.Internal.FinalizeSector == nil {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
return s.Internal.FinalizeSector(p0, p1, p2)
|
||||
return s.Internal.FinalizeSector(p0, p1)
|
||||
}
|
||||
|
||||
func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storiface.SectorRef, p2 []storiface.Range) (storiface.CallID, error) {
|
||||
func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storiface.SectorRef) (storiface.CallID, error) {
|
||||
return *new(storiface.CallID), ErrNotSupported
|
||||
}
|
||||
|
||||
|
69
api/types.go
69
api/types.go
@ -17,6 +17,7 @@ import (
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
@ -58,6 +59,11 @@ type MessageSendSpec struct {
|
||||
MsgUuid uuid.UUID
|
||||
}
|
||||
|
||||
type MpoolMessageWhole struct {
|
||||
Msg *types.Message
|
||||
Spec *MessageSendSpec
|
||||
}
|
||||
|
||||
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||
type GraphSyncDataTransfer struct {
|
||||
// GraphSync request id for this transfer
|
||||
@ -295,6 +301,9 @@ type MinerInfo struct {
|
||||
SectorSize abi.SectorSize
|
||||
WindowPoStPartitionSectors uint64
|
||||
ConsensusFaultElapsed abi.ChainEpoch
|
||||
Beneficiary address.Address
|
||||
BeneficiaryTerm *miner.BeneficiaryTerm
|
||||
PendingBeneficiaryTerm *miner.PendingBeneficiaryChange
|
||||
}
|
||||
|
||||
type NetworkParams struct {
|
||||
@ -327,4 +336,64 @@ type ForkUpgradeParams struct {
|
||||
UpgradeHyperdriveHeight abi.ChainEpoch
|
||||
UpgradeChocolateHeight abi.ChainEpoch
|
||||
UpgradeOhSnapHeight abi.ChainEpoch
|
||||
UpgradeSkyrHeight abi.ChainEpoch
|
||||
UpgradeSharkHeight abi.ChainEpoch
|
||||
}
|
||||
|
||||
type NonceMapType map[address.Address]uint64
|
||||
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||
|
||||
type RaftStateData struct {
|
||||
NonceMap NonceMapType
|
||||
MsgUuids MsgUuidMapType
|
||||
}
|
||||
|
||||
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]uint64)
|
||||
for a, n := range *n {
|
||||
marshalled[a.String()] = n
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]uint64)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*n = make(map[address.Address]uint64)
|
||||
for saddr, nonce := range unmarshalled {
|
||||
a, err := address.NewFromString(saddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*n)[a] = nonce
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||
marshalled := make(map[string]*types.SignedMessage)
|
||||
for u, msg := range *m {
|
||||
marshalled[u.String()] = msg
|
||||
}
|
||||
return json.Marshal(marshalled)
|
||||
}
|
||||
|
||||
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||
unmarshalled := make(map[string]*types.SignedMessage)
|
||||
err := json.Unmarshal(b, &unmarshalled)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||
for suid, msg := range unmarshalled {
|
||||
u, err := uuid.Parse(suid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*m)[u] = msg
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -14,8 +14,9 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
@ -531,6 +532,16 @@ type FullNode interface {
|
||||
StateMarketDeals(context.Context, types.TipSetKey) (map[string]*api.MarketDeal, error) //perm:read
|
||||
// StateMarketStorageDeal returns information about the indicated deal
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read
|
||||
// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal.
|
||||
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetAllocation returns the allocation for a given address and allocation ID.
|
||||
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetAllocations returns the all the allocations for a given client.
|
||||
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
|
||||
// StateGetClaim returns the claim for a given address and claim ID.
|
||||
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
|
||||
// StateGetClaims returns the all the claims for a given provider.
|
||||
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
|
||||
// StateLookupID retrieves the ID address of the given address
|
||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
|
||||
// StateAccountKey returns the public key address of the given ID address
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
|
@ -16,8 +16,9 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
abinetwork "github.com/filecoin-project/go-state-types/network"
|
||||
@ -277,6 +278,16 @@ type FullNodeStruct struct {
|
||||
|
||||
StateGetActor func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*types.Actor, error) `perm:"read"`
|
||||
|
||||
StateGetAllocation func(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetAllocationForPendingDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetAllocations func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) `perm:"read"`
|
||||
|
||||
StateGetClaim func(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetClaims func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) `perm:"read"`
|
||||
|
||||
StateGetNetworkParams func(p0 context.Context) (*api.NetworkParams, error) `perm:"read"`
|
||||
|
||||
StateGetRandomnessFromBeacon func(p0 context.Context, p1 crypto.DomainSeparationTag, p2 abi.ChainEpoch, p3 []byte, p4 types.TipSetKey) (abi.Randomness, error) `perm:"read"`
|
||||
@ -1793,6 +1804,61 @@ func (s *FullNodeStub) StateGetActor(p0 context.Context, p1 address.Address, p2
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocation == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocation(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocation(p0 context.Context, p1 address.Address, p2 verifregtypes.AllocationId, p3 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocationForPendingDeal == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocationForPendingDeal(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocationForPendingDeal(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*verifregtypes.Allocation, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
if s.Internal.StateGetAllocations == nil {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetAllocations(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetAllocations(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) {
|
||||
return *new(map[verifregtypes.AllocationId]verifregtypes.Allocation), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaim == nil {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaim(p0, p1, p2, p3)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetClaim(p0 context.Context, p1 address.Address, p2 verifregtypes.ClaimId, p3 types.TipSetKey) (*verifregtypes.Claim, error) {
|
||||
return nil, ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
if s.Internal.StateGetClaims == nil {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
return s.Internal.StateGetClaims(p0, p1, p2)
|
||||
}
|
||||
|
||||
func (s *FullNodeStub) StateGetClaims(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) {
|
||||
return *new(map[verifregtypes.ClaimId]verifregtypes.Claim), ErrNotSupported
|
||||
}
|
||||
|
||||
func (s *FullNodeStruct) StateGetNetworkParams(p0 context.Context) (*api.NetworkParams, error) {
|
||||
if s.Internal.StateGetNetworkParams == nil {
|
||||
return nil, ErrNotSupported
|
||||
|
@ -26,8 +26,9 @@ import (
|
||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
miner "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||
dline "github.com/filecoin-project/go-state-types/dline"
|
||||
network "github.com/filecoin-project/go-state-types/network"
|
||||
@ -2157,6 +2158,21 @@ func (mr *MockFullNodeMockRecorder) Shutdown(arg0 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockFullNode)(nil).Shutdown), arg0)
|
||||
}
|
||||
|
||||
// StartTime mocks base method.
|
||||
func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StartTime", arg0)
|
||||
ret0, _ := ret[0].(time.Time)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StartTime indicates an expected call of StartTime.
|
||||
func (mr *MockFullNodeMockRecorder) StartTime(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockFullNode)(nil).StartTime), arg0)
|
||||
}
|
||||
|
||||
// StateAccountKey mocks base method.
|
||||
func (m *MockFullNode) StateAccountKey(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (address.Address, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -2233,10 +2249,10 @@ func (mr *MockFullNodeMockRecorder) StateCall(arg0, arg1, arg2 interface{}) *gom
|
||||
}
|
||||
|
||||
// StateChangedActors mocks base method.
|
||||
func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.Actor, error) {
|
||||
func (m *MockFullNode) StateChangedActors(arg0 context.Context, arg1, arg2 cid.Cid) (map[string]types.ActorV5, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateChangedActors", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[string]types.Actor)
|
||||
ret0, _ := ret[0].(map[string]types.ActorV5)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@ -2308,10 +2324,10 @@ func (mr *MockFullNodeMockRecorder) StateDecodeParams(arg0, arg1, arg2, arg3, ar
|
||||
}
|
||||
|
||||
// StateGetActor mocks base method.
|
||||
func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.Actor, error) {
|
||||
func (m *MockFullNode) StateGetActor(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (*types.ActorV5, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetActor", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*types.Actor)
|
||||
ret0, _ := ret[0].(*types.ActorV5)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
@ -2322,6 +2338,81 @@ func (mr *MockFullNodeMockRecorder) StateGetActor(arg0, arg1, arg2 interface{})
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetActor", reflect.TypeOf((*MockFullNode)(nil).StateGetActor), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetAllocation mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocation(arg0 context.Context, arg1 address.Address, arg2 verifreg.AllocationId, arg3 types.TipSetKey) (*verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocation", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(*verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocation indicates an expected call of StateGetAllocation.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocation(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocation", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocation), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// StateGetAllocationForPendingDeal mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocationForPendingDeal(arg0 context.Context, arg1 abi.DealID, arg2 types.TipSetKey) (*verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocationForPendingDeal", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocationForPendingDeal indicates an expected call of StateGetAllocationForPendingDeal.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocationForPendingDeal(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocationForPendingDeal", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocationForPendingDeal), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetAllocations mocks base method.
|
||||
func (m *MockFullNode) StateGetAllocations(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.AllocationId]verifreg.Allocation, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetAllocations", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[verifreg.AllocationId]verifreg.Allocation)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetAllocations indicates an expected call of StateGetAllocations.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetAllocations(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetAllocations", reflect.TypeOf((*MockFullNode)(nil).StateGetAllocations), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetClaim mocks base method.
|
||||
func (m *MockFullNode) StateGetClaim(arg0 context.Context, arg1 address.Address, arg2 verifreg.ClaimId, arg3 types.TipSetKey) (*verifreg.Claim, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetClaim", arg0, arg1, arg2, arg3)
|
||||
ret0, _ := ret[0].(*verifreg.Claim)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetClaim indicates an expected call of StateGetClaim.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetClaim(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaim", reflect.TypeOf((*MockFullNode)(nil).StateGetClaim), arg0, arg1, arg2, arg3)
|
||||
}
|
||||
|
||||
// StateGetClaims mocks base method.
|
||||
func (m *MockFullNode) StateGetClaims(arg0 context.Context, arg1 address.Address, arg2 types.TipSetKey) (map[verifreg.ClaimId]verifreg.Claim, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "StateGetClaims", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(map[verifreg.ClaimId]verifreg.Claim)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// StateGetClaims indicates an expected call of StateGetClaims.
|
||||
func (mr *MockFullNodeMockRecorder) StateGetClaims(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateGetClaims", reflect.TypeOf((*MockFullNode)(nil).StateGetClaims), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// StateGetNetworkParams mocks base method.
|
||||
func (m *MockFullNode) StateGetNetworkParams(arg0 context.Context) (*api.NetworkParams, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
|
@ -58,7 +58,7 @@ var (
|
||||
FullAPIVersion1 = newVer(2, 3, 0)
|
||||
|
||||
MinerAPIVersion0 = newVer(1, 5, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 6, 0)
|
||||
WorkerAPIVersion0 = newVer(1, 7, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
|
@ -181,18 +181,22 @@ func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block,
|
||||
}
|
||||
|
||||
bs.stateLock.Lock()
|
||||
defer bs.stateLock.Unlock()
|
||||
v, ok := bs.flushingBatch.blockMap[c]
|
||||
if ok {
|
||||
bs.stateLock.Unlock()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
v, ok = bs.bufferedBatch.blockMap[c]
|
||||
if ok {
|
||||
bs.stateLock.Unlock()
|
||||
return v, nil
|
||||
}
|
||||
bs.stateLock.Unlock()
|
||||
|
||||
return bs.Get(ctx, c)
|
||||
// We have to check the backing store one more time because it may have been flushed by the
|
||||
// time we were able to take the lock above.
|
||||
return bs.backingBs.Get(ctx, c)
|
||||
}
|
||||
|
||||
func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@ -29,6 +30,10 @@ func TestAutobatchBlockstore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b2.RawData(), v2.RawData())
|
||||
|
||||
// Regression test for a deadlock.
|
||||
_, err = ab.Get(ctx, b3.Cid())
|
||||
require.True(t, ipld.IsNotFound(err))
|
||||
|
||||
require.NoError(t, ab.Flush(ctx))
|
||||
require.NoError(t, ab.Shutdown(ctx))
|
||||
}
|
||||
|
441
blockstore/cbor_gen.go
Normal file
441
blockstore/cbor_gen.go
Normal file
@ -0,0 +1,441 @@
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
var _ = cid.Undef
|
||||
var _ = math.E
|
||||
var _ = sort.Sort
|
||||
|
||||
var lengthBufNetRpcReq = []byte{132}
|
||||
|
||||
func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcReq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCReqType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Cid ([]cid.Cid) (slice)
|
||||
if len(t.Cid) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Cid was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cid))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Cid {
|
||||
if err := cbg.WriteCid(w, v); err != nil {
|
||||
return xerrors.Errorf("failed writing cid field t.Cid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// t.Data ([][]uint8) (slice)
|
||||
if len(t.Data) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Data was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Data))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Data {
|
||||
if len(v) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field v was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(v))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := cw.Write(v[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcReq{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 4 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCReqType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCReqType(extra)
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.ID = uint64(extra)
|
||||
|
||||
}
|
||||
// t.Cid ([]cid.Cid) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Cid: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Cid = make([]cid.Cid, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("reading cid field t.Cid failed: %w", err)
|
||||
}
|
||||
t.Cid[i] = c
|
||||
}
|
||||
|
||||
// t.Data ([][]uint8) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Data: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data = make([][]uint8, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
{
|
||||
var maj byte
|
||||
var extra uint64
|
||||
var err error
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data[i] = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var lengthBufNetRpcResp = []byte{131}
|
||||
|
||||
func (t *NetRpcResp) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcResp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCRespType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Data ([]uint8) (slice)
|
||||
if len(t.Data) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field t.Data was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := cw.Write(t.Data[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcResp{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCRespType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCRespType(extra)
|
||||
// t.ID (uint64) (uint64)
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.ID = uint64(extra)
|
||||
|
||||
}
|
||||
// t.Data ([]uint8) (slice)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Data: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Data = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(cr, t.Data[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var lengthBufNetRpcErr = []byte{131}
|
||||
|
||||
func (t *NetRpcErr) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
|
||||
cw := cbg.NewCborWriter(w)
|
||||
|
||||
if _, err := cw.Write(lengthBufNetRpcErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCErrType) (uint8)
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Msg (string) (string)
|
||||
if len(t.Msg) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field t.Msg was too long")
|
||||
}
|
||||
|
||||
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string(t.Msg)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Cid (cid.Cid) (struct)
|
||||
|
||||
if t.Cid == nil {
|
||||
if _, err := cw.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCid(cw, *t.Cid); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.Cid: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) {
|
||||
*t = NetRpcErr{}
|
||||
|
||||
cr := cbg.NewCborReader(r)
|
||||
|
||||
maj, extra, err := cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("cbor input should be of type array")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
}
|
||||
|
||||
// t.Type (blockstore.NetRPCErrType) (uint8)
|
||||
|
||||
maj, extra, err = cr.ReadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint8 field")
|
||||
}
|
||||
if extra > math.MaxUint8 {
|
||||
return fmt.Errorf("integer in input was too large for uint8 field")
|
||||
}
|
||||
t.Type = NetRPCErrType(extra)
|
||||
// t.Msg (string) (string)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Msg = string(sval)
|
||||
}
|
||||
// t.Cid (cid.Cid) (struct)
|
||||
|
||||
{
|
||||
|
||||
b, err := cr.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b != cbg.CborNull[0] {
|
||||
if err := cr.UnreadByte(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := cbg.ReadCid(cr)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.Cid: %w", err)
|
||||
}
|
||||
|
||||
t.Cid = &c
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
@ -47,6 +47,9 @@ func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error)
|
||||
if !ok {
|
||||
return nil, ipld.ErrNotFound{Cid: k}
|
||||
}
|
||||
if b.Cid().Prefix().Codec != k.Prefix().Codec {
|
||||
return blocks.NewBlockWithCid(b.RawData(), k)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
|
45
blockstore/mem_test.go
Normal file
45
blockstore/mem_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMemGetCodec(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
bs := NewMemory()
|
||||
|
||||
cborArr := []byte{0x82, 1, 2}
|
||||
|
||||
h, err := mh.Sum(cborArr, mh.SHA2_256, -1)
|
||||
require.NoError(t, err)
|
||||
|
||||
rawCid := cid.NewCidV1(cid.Raw, h)
|
||||
rawBlk, err := blocks.NewBlockWithCid(cborArr, rawCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = bs.Put(ctx, rawBlk)
|
||||
require.NoError(t, err)
|
||||
|
||||
cborCid := cid.NewCidV1(cid.DagCBOR, h)
|
||||
|
||||
cborBlk, err := bs.Get(ctx, cborCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, cborCid.Prefix(), cborBlk.Cid().Prefix())
|
||||
require.EqualValues(t, cborArr, cborBlk.RawData())
|
||||
|
||||
// was allocated
|
||||
require.NotEqual(t, cborBlk, rawBlk)
|
||||
|
||||
gotRawBlk, err := bs.Get(ctx, rawCid)
|
||||
require.NoError(t, err)
|
||||
|
||||
// not allocated
|
||||
require.Equal(t, rawBlk, gotRawBlk)
|
||||
}
|
424
blockstore/net.go
Normal file
424
blockstore/net.go
Normal file
@ -0,0 +1,424 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type NetRPCReqType byte
|
||||
|
||||
const (
|
||||
NRpcHas NetRPCReqType = iota
|
||||
NRpcGet
|
||||
NRpcGetSize
|
||||
NRpcPut
|
||||
NRpcDelete
|
||||
|
||||
// todo cancel req
|
||||
)
|
||||
|
||||
type NetRPCRespType byte
|
||||
|
||||
const (
|
||||
NRpcOK NetRPCRespType = iota
|
||||
NRpcErr
|
||||
NRpcMore
|
||||
)
|
||||
|
||||
type NetRPCErrType byte
|
||||
|
||||
const (
|
||||
NRpcErrGeneric NetRPCErrType = iota
|
||||
NRpcErrNotFound
|
||||
)
|
||||
|
||||
type NetRpcReq struct {
|
||||
Type NetRPCReqType
|
||||
ID uint64
|
||||
|
||||
Cid []cid.Cid // todo maxsize?
|
||||
Data [][]byte // todo maxsize?
|
||||
}
|
||||
|
||||
type NetRpcResp struct {
|
||||
Type NetRPCRespType
|
||||
ID uint64
|
||||
|
||||
// error or cids in allkeys
|
||||
Data []byte // todo maxsize?
|
||||
|
||||
next <-chan NetRpcResp
|
||||
}
|
||||
|
||||
type NetRpcErr struct {
|
||||
Type NetRPCErrType
|
||||
|
||||
Msg string
|
||||
|
||||
// in case of NRpcErrNotFound
|
||||
Cid *cid.Cid
|
||||
}
|
||||
|
||||
type NetworkStore struct {
|
||||
// note: writer is thread-safe
|
||||
msgStream msgio.ReadWriteCloser
|
||||
|
||||
// atomic
|
||||
reqCount uint64
|
||||
|
||||
respLk sync.Mutex
|
||||
|
||||
// respMap is nil after store closes
|
||||
respMap map[uint64]chan<- NetRpcResp
|
||||
|
||||
closing chan struct{}
|
||||
closed chan struct{}
|
||||
|
||||
closeLk sync.Mutex
|
||||
onClose []func()
|
||||
}
|
||||
|
||||
func NewNetworkStore(mss msgio.ReadWriteCloser) *NetworkStore {
|
||||
ns := &NetworkStore{
|
||||
msgStream: mss,
|
||||
|
||||
respMap: map[uint64]chan<- NetRpcResp{},
|
||||
|
||||
closing: make(chan struct{}),
|
||||
closed: make(chan struct{}),
|
||||
}
|
||||
|
||||
go ns.receive()
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (n *NetworkStore) shutdown(msg string) {
|
||||
if err := n.msgStream.Close(); err != nil {
|
||||
log.Errorw("closing netstore msg stream", "error", err)
|
||||
}
|
||||
|
||||
nerr := NetRpcErr{
|
||||
Type: NRpcErrGeneric,
|
||||
Msg: msg,
|
||||
Cid: nil,
|
||||
}
|
||||
|
||||
var errb bytes.Buffer
|
||||
if err := nerr.MarshalCBOR(&errb); err != nil {
|
||||
log.Errorw("netstore shutdown: error marshaling error", "err", err)
|
||||
}
|
||||
|
||||
n.respLk.Lock()
|
||||
for id, resps := range n.respMap {
|
||||
resps <- NetRpcResp{
|
||||
Type: NRpcErr,
|
||||
ID: id,
|
||||
Data: errb.Bytes(),
|
||||
}
|
||||
}
|
||||
|
||||
n.respMap = nil
|
||||
|
||||
n.respLk.Unlock()
|
||||
}
|
||||
|
||||
func (n *NetworkStore) OnClose(cb func()) {
|
||||
n.closeLk.Lock()
|
||||
defer n.closeLk.Unlock()
|
||||
|
||||
select {
|
||||
case <-n.closed:
|
||||
cb()
|
||||
default:
|
||||
n.onClose = append(n.onClose, cb)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) receive() {
|
||||
defer func() {
|
||||
n.closeLk.Lock()
|
||||
defer n.closeLk.Unlock()
|
||||
|
||||
close(n.closed)
|
||||
if n.onClose != nil {
|
||||
for _, f := range n.onClose {
|
||||
f()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-n.closing:
|
||||
n.shutdown("netstore stopping")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
msg, err := n.msgStream.ReadMsg()
|
||||
if err != nil {
|
||||
n.shutdown(fmt.Sprintf("netstore ReadMsg: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
var resp NetRpcResp
|
||||
if err := resp.UnmarshalCBOR(bytes.NewReader(msg)); err != nil {
|
||||
n.shutdown(fmt.Sprintf("unmarshaling netstore response: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
n.msgStream.ReleaseMsg(msg)
|
||||
|
||||
n.respLk.Lock()
|
||||
if ch, ok := n.respMap[resp.ID]; ok {
|
||||
if resp.Type == NRpcMore {
|
||||
nch := make(chan NetRpcResp, 1)
|
||||
resp.next = nch
|
||||
n.respMap[resp.ID] = nch
|
||||
} else {
|
||||
delete(n.respMap, resp.ID)
|
||||
}
|
||||
|
||||
ch <- resp
|
||||
}
|
||||
n.respLk.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) sendRpc(rt NetRPCReqType, cids []cid.Cid, data [][]byte) (uint64, <-chan NetRpcResp, error) {
|
||||
rid := atomic.AddUint64(&n.reqCount, 1)
|
||||
|
||||
respCh := make(chan NetRpcResp, 1) // todo pool?
|
||||
|
||||
n.respLk.Lock()
|
||||
if n.respMap == nil {
|
||||
n.respLk.Unlock()
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
n.respMap[rid] = respCh
|
||||
n.respLk.Unlock()
|
||||
|
||||
req := NetRpcReq{
|
||||
Type: rt,
|
||||
ID: rid,
|
||||
Cid: cids,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
var rbuf bytes.Buffer // todo buffer pool
|
||||
if err := req.MarshalCBOR(&rbuf); err != nil {
|
||||
n.respLk.Lock()
|
||||
defer n.respLk.Unlock()
|
||||
|
||||
if n.respMap == nil {
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
delete(n.respMap, rid)
|
||||
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
if err := n.msgStream.WriteMsg(rbuf.Bytes()); err != nil {
|
||||
n.respLk.Lock()
|
||||
defer n.respLk.Unlock()
|
||||
|
||||
if n.respMap == nil {
|
||||
return 0, nil, xerrors.Errorf("netstore closed")
|
||||
}
|
||||
delete(n.respMap, rid)
|
||||
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
return rid, respCh, nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) waitResp(ctx context.Context, rch <-chan NetRpcResp, rid uint64) (NetRpcResp, error) {
|
||||
select {
|
||||
case resp := <-rch:
|
||||
if resp.Type == NRpcErr {
|
||||
var e NetRpcErr
|
||||
if err := e.UnmarshalCBOR(bytes.NewReader(resp.Data)); err != nil {
|
||||
return NetRpcResp{}, xerrors.Errorf("unmarshaling error data: %w", err)
|
||||
}
|
||||
|
||||
var err error
|
||||
switch e.Type {
|
||||
case NRpcErrNotFound:
|
||||
if e.Cid != nil {
|
||||
err = ipld.ErrNotFound{
|
||||
Cid: *e.Cid,
|
||||
}
|
||||
} else {
|
||||
err = xerrors.Errorf("block not found, but cid was null")
|
||||
}
|
||||
case NRpcErrGeneric:
|
||||
err = xerrors.Errorf("generic error")
|
||||
default:
|
||||
err = xerrors.Errorf("unknown error type")
|
||||
}
|
||||
|
||||
return NetRpcResp{}, xerrors.Errorf("netstore error response: %s (%w)", e.Msg, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
case <-ctx.Done():
|
||||
// todo send cancel req
|
||||
|
||||
n.respLk.Lock()
|
||||
if n.respMap != nil {
|
||||
delete(n.respMap, rid)
|
||||
}
|
||||
n.respLk.Unlock()
|
||||
|
||||
return NetRpcResp{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Has(ctx context.Context, c cid.Cid) (bool, error) {
|
||||
req, rch, err := n.sendRpc(NRpcHas, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resp.Data) != 1 {
|
||||
return false, xerrors.Errorf("expected reposnse length to be 1 byte")
|
||||
}
|
||||
switch resp.Data[0] {
|
||||
case cbg.CborBoolTrue[0]:
|
||||
return true, nil
|
||||
case cbg.CborBoolFalse[0]:
|
||||
return false, nil
|
||||
default:
|
||||
return false, xerrors.Errorf("has: bad response: %x", resp.Data[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
|
||||
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks.NewBlockWithCid(resp.Data, c)
|
||||
}
|
||||
|
||||
func (n *NetworkStore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return callback(resp.Data) // todo return buf to pool
|
||||
}
|
||||
|
||||
func (n *NetworkStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
|
||||
req, rch, err := n.sendRpc(NRpcGetSize, []cid.Cid{c}, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
resp, err := n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(resp.Data) != 4 {
|
||||
return 0, xerrors.Errorf("expected getsize response to be 4 bytes, was %d", resp.Data)
|
||||
}
|
||||
|
||||
return int(binary.LittleEndian.Uint32(resp.Data)), nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Put(ctx context.Context, block blocks.Block) error {
|
||||
return n.PutMany(ctx, []blocks.Block{block})
|
||||
}
|
||||
|
||||
func (n *NetworkStore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||
// todo pool
|
||||
cids := make([]cid.Cid, len(blocks))
|
||||
blkDatas := make([][]byte, len(blocks))
|
||||
for i, block := range blocks {
|
||||
cids[i] = block.Cid()
|
||||
blkDatas[i] = block.RawData()
|
||||
}
|
||||
|
||||
req, rch, err := n.sendRpc(NRpcPut, cids, blkDatas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) DeleteBlock(ctx context.Context, c cid.Cid) error {
|
||||
return n.DeleteMany(ctx, []cid.Cid{c})
|
||||
}
|
||||
|
||||
func (n *NetworkStore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
||||
req, rch, err := n.sendRpc(NRpcDelete, cids, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = n.waitResp(ctx, rch, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NetworkStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
return nil, xerrors.Errorf("not supported")
|
||||
}
|
||||
|
||||
func (n *NetworkStore) HashOnRead(enabled bool) {
|
||||
// todo
|
||||
return
|
||||
}
|
||||
|
||||
func (n *NetworkStore) Stop(ctx context.Context) error {
|
||||
close(n.closing)
|
||||
|
||||
select {
|
||||
case <-n.closed:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var _ Blockstore = &NetworkStore{}
|
237
blockstore/net_serve.go
Normal file
237
blockstore/net_serve.go
Normal file
@ -0,0 +1,237 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type NetworkStoreHandler struct {
|
||||
msgStream msgio.ReadWriteCloser
|
||||
|
||||
bs Blockstore
|
||||
}
|
||||
|
||||
// NOTE: This code isn't yet hardened to accept untrusted input. See TODOs here and in net.go
|
||||
func HandleNetBstoreStream(ctx context.Context, bs Blockstore, mss msgio.ReadWriteCloser) *NetworkStoreHandler {
|
||||
ns := &NetworkStoreHandler{
|
||||
msgStream: mss,
|
||||
bs: bs,
|
||||
}
|
||||
|
||||
go ns.handle(ctx)
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) handle(ctx context.Context) {
|
||||
defer func() {
|
||||
if err := h.msgStream.Close(); err != nil {
|
||||
log.Errorw("error closing blockstore stream", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
var req NetRpcReq
|
||||
|
||||
ms, err := h.msgStream.ReadMsg()
|
||||
if err != nil {
|
||||
log.Warnw("bstore stream err", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.UnmarshalCBOR(bytes.NewReader(ms)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
h.msgStream.ReleaseMsg(ms)
|
||||
|
||||
switch req.Type {
|
||||
case NRpcHas:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := h.bs.Has(ctx, req.Cid[0])
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var resData [1]byte
|
||||
if res {
|
||||
resData[0] = cbg.CborBoolTrue[0]
|
||||
} else {
|
||||
resData[0] = cbg.CborBoolFalse[0]
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
case NRpcGet:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := h.bs.View(ctx, req.Cid[0], func(bdata []byte) error {
|
||||
return h.respond(req.ID, NRpcOK, bdata)
|
||||
})
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
case NRpcGetSize:
|
||||
if len(req.Cid) != 1 {
|
||||
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
sz, err := h.bs.GetSize(ctx, req.Cid[0])
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var resData [4]byte
|
||||
binary.LittleEndian.PutUint32(resData[:], uint32(sz))
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
case NRpcPut:
|
||||
blocks := make([]block.Block, len(req.Cid))
|
||||
|
||||
if len(req.Cid) != len(req.Data) {
|
||||
if err := h.respondError(req.ID, xerrors.New("cid count didn't match data count"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := range req.Cid {
|
||||
blocks[i], err = block.NewBlockWithCid(req.Data[i], req.Cid[i])
|
||||
if err != nil {
|
||||
log.Warnw("make block", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err := h.bs.PutMany(ctx, blocks)
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
case NRpcDelete:
|
||||
err := h.bs.DeleteMany(ctx, req.Cid)
|
||||
if err != nil {
|
||||
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
|
||||
log.Warnw("writing response", "error", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
if err := h.respondError(req.ID, xerrors.New("unsupported request type"), cid.Undef); err != nil {
|
||||
log.Warnw("writing error response", "error", err)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) respondError(req uint64, uerr error, c cid.Cid) error {
|
||||
var resp NetRpcResp
|
||||
resp.ID = req
|
||||
resp.Type = NRpcErr
|
||||
|
||||
nerr := NetRpcErr{
|
||||
Type: NRpcErrGeneric,
|
||||
Msg: uerr.Error(),
|
||||
}
|
||||
if ipld.IsNotFound(uerr) {
|
||||
nerr.Type = NRpcErrNotFound
|
||||
nerr.Cid = &c
|
||||
}
|
||||
|
||||
var edata bytes.Buffer
|
||||
if err := nerr.MarshalCBOR(&edata); err != nil {
|
||||
return xerrors.Errorf("marshaling error data: %w", err)
|
||||
}
|
||||
|
||||
resp.Data = edata.Bytes()
|
||||
|
||||
var msg bytes.Buffer
|
||||
if err := resp.MarshalCBOR(&msg); err != nil {
|
||||
return xerrors.Errorf("marshaling error response: %w", err)
|
||||
}
|
||||
|
||||
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
|
||||
return xerrors.Errorf("write error response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *NetworkStoreHandler) respond(req uint64, rt NetRPCRespType, data []byte) error {
|
||||
var resp NetRpcResp
|
||||
resp.ID = req
|
||||
resp.Type = rt
|
||||
resp.Data = data
|
||||
|
||||
var msg bytes.Buffer
|
||||
if err := resp.MarshalCBOR(&msg); err != nil {
|
||||
return xerrors.Errorf("marshaling response: %w", err)
|
||||
}
|
||||
|
||||
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
|
||||
return xerrors.Errorf("write response: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
63
blockstore/net_test.go
Normal file
63
blockstore/net_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
block "github.com/ipfs/go-block-format"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNetBstore(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cr, sw := io.Pipe()
|
||||
sr, cw := io.Pipe()
|
||||
|
||||
cm := msgio.Combine(msgio.NewWriter(cw), msgio.NewReader(cr))
|
||||
sm := msgio.Combine(msgio.NewWriter(sw), msgio.NewReader(sr))
|
||||
|
||||
bbs := NewMemorySync()
|
||||
_ = HandleNetBstoreStream(ctx, bbs, sm)
|
||||
|
||||
nbs := NewNetworkStore(cm)
|
||||
|
||||
tb1 := block.NewBlock([]byte("aoeu"))
|
||||
|
||||
h, err := nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.False(t, h)
|
||||
|
||||
err = nbs.Put(ctx, tb1)
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err = nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.True(t, h)
|
||||
|
||||
sz, err := nbs.GetSize(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 4, sz)
|
||||
|
||||
err = nbs.DeleteBlock(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
|
||||
h, err = nbs.Has(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.False(t, h)
|
||||
|
||||
_, err = nbs.Get(ctx, tb1.Cid())
|
||||
fmt.Println(err)
|
||||
require.True(t, ipld.IsNotFound(err))
|
||||
|
||||
err = nbs.Put(ctx, tb1)
|
||||
require.NoError(t, err)
|
||||
|
||||
b, err := nbs.Get(ctx, tb1.Cid())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "aoeu", string(b.RawData()))
|
||||
}
|
100
blockstore/net_ws.go
Normal file
100
blockstore/net_ws.go
Normal file
@ -0,0 +1,100 @@
|
||||
package blockstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/libp2p/go-msgio"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type wsWrapper struct {
|
||||
wc *websocket.Conn
|
||||
|
||||
nextMsg []byte
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Read(b []byte) (int, error) {
|
||||
return 0, xerrors.New("read unsupported")
|
||||
}
|
||||
|
||||
func (w *wsWrapper) ReadMsg() ([]byte, error) {
|
||||
if w.nextMsg != nil {
|
||||
nm := w.nextMsg
|
||||
w.nextMsg = nil
|
||||
return nm, nil
|
||||
}
|
||||
|
||||
mt, r, err := w.wc.NextReader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case websocket.BinaryMessage, websocket.TextMessage:
|
||||
default:
|
||||
return nil, xerrors.Errorf("unexpected message type")
|
||||
}
|
||||
|
||||
// todo pool
|
||||
// todo limit sizes
|
||||
var mbuf bytes.Buffer
|
||||
if _, err := mbuf.ReadFrom(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mbuf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (w *wsWrapper) ReleaseMsg(bytes []byte) {
|
||||
// todo use a pool
|
||||
}
|
||||
|
||||
func (w *wsWrapper) NextMsgLen() (int, error) {
|
||||
if w.nextMsg != nil {
|
||||
return len(w.nextMsg), nil
|
||||
}
|
||||
|
||||
mt, msg, err := w.wc.ReadMessage()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case websocket.BinaryMessage, websocket.TextMessage:
|
||||
default:
|
||||
return 0, xerrors.Errorf("unexpected message type")
|
||||
}
|
||||
|
||||
w.nextMsg = msg
|
||||
return len(w.nextMsg), nil
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Write(bytes []byte) (int, error) {
|
||||
return 0, xerrors.New("write unsupported")
|
||||
}
|
||||
|
||||
func (w *wsWrapper) WriteMsg(bytes []byte) error {
|
||||
return w.wc.WriteMessage(websocket.BinaryMessage, bytes)
|
||||
}
|
||||
|
||||
func (w *wsWrapper) Close() error {
|
||||
return w.wc.Close()
|
||||
}
|
||||
|
||||
var _ msgio.ReadWriteCloser = &wsWrapper{}
|
||||
|
||||
func wsConnToMio(wc *websocket.Conn) msgio.ReadWriteCloser {
|
||||
return &wsWrapper{
|
||||
wc: wc,
|
||||
}
|
||||
}
|
||||
|
||||
func HandleNetBstoreWS(ctx context.Context, bs Blockstore, wc *websocket.Conn) *NetworkStoreHandler {
|
||||
return HandleNetBstoreStream(ctx, bs, wsConnToMio(wc))
|
||||
}
|
||||
|
||||
func NewNetworkStoreWS(wc *websocket.Conn) *NetworkStore {
|
||||
return NewNetworkStore(wsConnToMio(wc))
|
||||
}
|
@ -98,6 +98,10 @@ type Config struct {
|
||||
// and directly purges cold blocks.
|
||||
DiscardColdBlocks bool
|
||||
|
||||
// UniversalColdBlocks indicates whether all blocks being garbage collected and purged
|
||||
// from the hotstore should be written to the cold store
|
||||
UniversalColdBlocks bool
|
||||
|
||||
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
|
||||
// It has the following semantics:
|
||||
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
|
||||
@ -111,21 +115,6 @@ type Config struct {
|
||||
// A positive value is the number of compactions before a full GC is performed;
|
||||
// a value of 1 will perform full GC in every compaction.
|
||||
HotStoreFullGCFrequency uint64
|
||||
|
||||
// EnableColdStoreAutoPrune turns on compaction of the cold store i.e. pruning
|
||||
// where hotstore compaction occurs every finality epochs pruning happens every 3 finalities
|
||||
// Default is false
|
||||
EnableColdStoreAutoPrune bool
|
||||
|
||||
// ColdStoreFullGCFrequency specifies how often to performa a full (moving) GC on the coldstore.
|
||||
// Only applies if auto prune is enabled. A value of 0 disables while a value of 1 will do
|
||||
// full GC in every prune.
|
||||
// Default is 7 (about once every a week)
|
||||
ColdStoreFullGCFrequency uint64
|
||||
|
||||
// ColdStoreRetention specifies the retention policy for data reachable from the chain, in
|
||||
// finalities beyond the compaction boundary, default is 0, -1 retains everything
|
||||
ColdStoreRetention int64
|
||||
}
|
||||
|
||||
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
||||
|
@ -125,7 +125,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error {
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}, func(cid.Cid) error { return nil })
|
||||
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("error walking chain: %w", err)
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/metrics"
|
||||
@ -134,39 +133,6 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
||||
log.Infow("compaction done", "took", time.Since(start))
|
||||
}()
|
||||
// only prune if auto prune is enabled and after at least one compaction
|
||||
} else if s.cfg.EnableColdStoreAutoPrune && epoch-s.pruneEpoch > PruneThreshold && s.compactionIndex > 0 {
|
||||
s.beginTxnProtect()
|
||||
s.compactType = cold
|
||||
go func() {
|
||||
defer atomic.StoreInt32(&s.compacting, 0)
|
||||
defer s.endTxnProtect()
|
||||
|
||||
log.Info("pruning splitstore")
|
||||
start := time.Now()
|
||||
|
||||
var retainP func(int64) bool
|
||||
switch {
|
||||
case s.cfg.ColdStoreRetention > int64(0):
|
||||
retainP = func(depth int64) bool {
|
||||
return depth <= int64(CompactionBoundary)+s.cfg.ColdStoreRetention*int64(build.Finality)
|
||||
}
|
||||
case s.cfg.ColdStoreRetention < 0:
|
||||
retainP = func(_ int64) bool { return true }
|
||||
default:
|
||||
retainP = func(depth int64) bool {
|
||||
return depth <= int64(CompactionBoundary)
|
||||
}
|
||||
}
|
||||
movingGC := s.cfg.ColdStoreFullGCFrequency > 0 && s.pruneIndex%int64(s.cfg.ColdStoreFullGCFrequency) == 0
|
||||
var gcOpts []bstore.BlockstoreGCOption
|
||||
if movingGC {
|
||||
gcOpts = append(gcOpts, bstore.WithFullGC(true))
|
||||
}
|
||||
doGC := func() error { return s.gcBlockstore(s.cold, gcOpts) }
|
||||
|
||||
s.prune(curTs, retainP, doGC)
|
||||
log.Infow("prune done", "took", time.Since(start))
|
||||
}()
|
||||
} else {
|
||||
// no compaction necessary
|
||||
atomic.StoreInt32(&s.compacting, 0)
|
||||
@ -562,6 +528,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
defer markSet.Close() //nolint:errcheck
|
||||
defer s.debug.Flush()
|
||||
|
||||
coldSet, err := s.markSetEnv.New("cold", s.markSetSize)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating cold mark set: %w", err)
|
||||
}
|
||||
defer coldSet.Close() //nolint:errcheck
|
||||
|
||||
if err := s.checkClosing(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -580,8 +552,34 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
startMark := time.Now()
|
||||
|
||||
count := new(int64)
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{},
|
||||
func(c cid.Cid) error {
|
||||
|
||||
coldCount := new(int64)
|
||||
fCold := func(c cid.Cid) error {
|
||||
// Writes to cold set optimized away in universal and discard mode
|
||||
//
|
||||
// Nothing gets written to cold store in discard mode so no cold objects to write
|
||||
// Everything not marked hot gets written to cold store in universal mode so no need to track cold objects separately
|
||||
if s.cfg.DiscardColdBlocks || s.cfg.UniversalColdBlocks {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
visit, err := coldSet.Visit(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error visiting object: %w", err)
|
||||
}
|
||||
|
||||
if !visit {
|
||||
return errStopWalk
|
||||
}
|
||||
|
||||
atomic.AddInt64(coldCount, 1)
|
||||
return nil
|
||||
}
|
||||
fHot := func(c cid.Cid) error {
|
||||
if isUnitaryObject(c) {
|
||||
return errStopWalk
|
||||
}
|
||||
@ -597,7 +595,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
|
||||
atomic.AddInt64(count, 1)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = s.walkChain(curTs, boundaryEpoch, inclMsgsEpoch, &noopVisitor{}, fHot, fCold)
|
||||
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error marking: %w", err)
|
||||
@ -631,8 +631,14 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
defer coldw.Close() //nolint:errcheck
|
||||
|
||||
purgew, err := NewColdSetWriter(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating deadset: %w", err)
|
||||
}
|
||||
defer purgew.Close() //nolint:errcheck
|
||||
|
||||
// some stats for logging
|
||||
var hotCnt, coldCnt int
|
||||
var hotCnt, coldCnt, purgeCnt int
|
||||
err = s.hot.ForEachKey(func(c cid.Cid) error {
|
||||
// was it marked?
|
||||
mark, err := markSet.Has(c)
|
||||
@ -645,9 +651,27 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// it's cold, mark it as candidate for move
|
||||
// it needs to be removed from hot store, mark it as candidate for purge
|
||||
if err := purgew.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to purge set: %w", err)
|
||||
}
|
||||
purgeCnt++
|
||||
|
||||
coldMark, err := coldSet.Has(c)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error checking cold mark set for %s: %w", c, err)
|
||||
}
|
||||
|
||||
// Discard mode: coldMark == false, s.cfg.UniversalColdBlocks == false, always return here, no writes to cold store
|
||||
// Universal mode: coldMark == false, s.cfg.UniversalColdBlocks == true, never stop here, all writes to cold store
|
||||
// Otherwise: s.cfg.UniversalColdBlocks == false, if !coldMark stop here and don't write to cold store, if coldMark continue and write to cold store
|
||||
if !coldMark && !s.cfg.UniversalColdBlocks { // universal mode means mark everything as cold
|
||||
return nil
|
||||
}
|
||||
|
||||
// it's cold, mark as candidate for move
|
||||
if err := coldw.Write(c); err != nil {
|
||||
return xerrors.Errorf("error writing cid to coldstore: %w", err)
|
||||
return xerrors.Errorf("error writing cid to cold set")
|
||||
}
|
||||
coldCnt++
|
||||
|
||||
@ -656,7 +680,9 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error collecting cold objects: %w", err)
|
||||
}
|
||||
|
||||
if err := purgew.Close(); err != nil {
|
||||
return xerrors.Errorf("erroring closing purgeset: %w", err)
|
||||
}
|
||||
if err := coldw.Close(); err != nil {
|
||||
return xerrors.Errorf("error closing coldset: %w", err)
|
||||
}
|
||||
@ -705,6 +731,12 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
}
|
||||
}
|
||||
|
||||
purger, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening coldset: %w", err)
|
||||
}
|
||||
defer purger.Close() //nolint:errcheck
|
||||
|
||||
// 4. Purge cold objects with checkpointing for recovery.
|
||||
// This is the critical section of compaction, whereby any cold object not in the markSet is
|
||||
// considered already deleted.
|
||||
@ -736,7 +768,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error {
|
||||
// 5. purge cold objects from the hotstore, taking protected references into account
|
||||
log.Info("purging cold objects from the hotstore")
|
||||
startPurge := time.Now()
|
||||
err = s.purge(coldr, checkpoint, markSet)
|
||||
err = s.purge(purger, checkpoint, markSet)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error purging cold objects: %w", err)
|
||||
}
|
||||
@ -864,7 +896,7 @@ func (s *SplitStore) endCriticalSection() {
|
||||
}
|
||||
|
||||
func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch,
|
||||
visitor ObjectVisitor, f func(cid.Cid) error) error {
|
||||
visitor ObjectVisitor, fHot, fCold func(cid.Cid) error) error {
|
||||
var walked ObjectVisitor
|
||||
var mx sync.Mutex
|
||||
// we copy the tipset first into a new slice, which allows us to reuse it in every epoch.
|
||||
@ -886,7 +918,7 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
|
||||
atomic.AddInt64(walkCnt, 1)
|
||||
|
||||
if err := f(c); err != nil {
|
||||
if err := fHot(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -904,27 +936,37 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp
|
||||
if inclMsgs < inclState {
|
||||
// we need to use walkObjectIncomplete here, as messages/receipts may be missing early on if we
|
||||
// synced from snapshot and have a long HotStoreMessageRetentionPolicy.
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, f, stopWalk); err != nil {
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fHot, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, f, stopWalk); err != nil {
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fHot, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
} else {
|
||||
if err := s.walkObject(hdr.Messages, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.Messages, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
|
||||
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.ParentMessageReceipts, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking message receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// messages and receipts outside of inclMsgs are included in the cold store
|
||||
if hdr.Height < inclMsgs && hdr.Height > 0 {
|
||||
if err := s.walkObjectIncomplete(hdr.Messages, visitor, fCold, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages (cid: %s): %w", hdr.Messages, err)
|
||||
}
|
||||
if err := s.walkObjectIncomplete(hdr.ParentMessageReceipts, visitor, fCold, stopWalk); err != nil {
|
||||
return xerrors.Errorf("error walking messages receipts (cid: %s): %w", hdr.ParentMessageReceipts, err)
|
||||
}
|
||||
}
|
||||
|
||||
// state is only retained if within the inclState boundary, with the exception of genesis
|
||||
if hdr.Height >= inclState || hdr.Height == 0 {
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil {
|
||||
if err := s.walkObject(hdr.ParentStateRoot, visitor, fHot); err != nil {
|
||||
return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err)
|
||||
}
|
||||
atomic.AddInt64(scanCnt, 1)
|
||||
@ -1296,7 +1338,7 @@ func (s *SplitStore) coldSetPath() string {
|
||||
return filepath.Join(s.path, "coldset")
|
||||
}
|
||||
|
||||
func (s *SplitStore) deadSetPath() string {
|
||||
func (s *SplitStore) discardSetPath() string {
|
||||
return filepath.Join(s.path, "deadset")
|
||||
}
|
||||
|
||||
|
@ -208,7 +208,7 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
log.Info("collecting dead objects")
|
||||
startCollect := time.Now()
|
||||
|
||||
deadw, err := NewColdSetWriter(s.deadSetPath())
|
||||
deadw, err := NewColdSetWriter(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error creating coldset: %w", err)
|
||||
}
|
||||
@ -267,7 +267,7 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
return err
|
||||
}
|
||||
|
||||
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||
deadr, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening deadset: %w", err)
|
||||
}
|
||||
@ -311,10 +311,10 @@ func (s *SplitStore) doPrune(curTs *types.TipSet, retainStateP func(int64) bool,
|
||||
log.Warnf("error removing checkpoint: %s", err)
|
||||
}
|
||||
if err := deadr.Close(); err != nil {
|
||||
log.Warnf("error closing deadset: %s", err)
|
||||
log.Warnf("error closing discard set: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||
log.Warnf("error removing deadset: %s", err)
|
||||
if err := os.Remove(s.discardSetPath()); err != nil {
|
||||
log.Warnf("error removing discard set: %s", err)
|
||||
}
|
||||
|
||||
// we are done; do some housekeeping
|
||||
@ -344,7 +344,7 @@ func (s *SplitStore) completePrune() error {
|
||||
}
|
||||
defer checkpoint.Close() //nolint:errcheck
|
||||
|
||||
deadr, err := NewColdSetReader(s.deadSetPath())
|
||||
deadr, err := NewColdSetReader(s.discardSetPath())
|
||||
if err != nil {
|
||||
return xerrors.Errorf("error opening deadset: %w", err)
|
||||
}
|
||||
@ -378,7 +378,7 @@ func (s *SplitStore) completePrune() error {
|
||||
if err := deadr.Close(); err != nil {
|
||||
log.Warnf("error closing deadset: %s", err)
|
||||
}
|
||||
if err := os.Remove(s.deadSetPath()); err != nil {
|
||||
if err := os.Remove(s.discardSetPath()); err != nil {
|
||||
log.Warnf("error removing deadset: %s", err)
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,7 @@ func init() {
|
||||
func testSplitStore(t *testing.T, cfg *Config) {
|
||||
ctx := context.Background()
|
||||
chain := &mockChain{t: t}
|
||||
fmt.Printf("Config: %v\n", cfg)
|
||||
|
||||
// the myriads of stores
|
||||
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||
@ -225,7 +226,7 @@ func TestSplitStoreCompaction(t *testing.T) {
|
||||
//stm: @SPLITSTORE_SPLITSTORE_OPEN_001, @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_PUT_001, @SPLITSTORE_SPLITSTORE_ADD_PROTECTOR_001
|
||||
//stm: @SPLITSTORE_SPLITSTORE_CLOSE_001
|
||||
testSplitStore(t, &Config{MarkSetType: "map"})
|
||||
testSplitStore(t, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
}
|
||||
|
||||
func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
@ -237,7 +238,7 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
badgerMarkSetBatchSize = bs
|
||||
})
|
||||
testSplitStore(t, &Config{MarkSetType: "badger"})
|
||||
testSplitStore(t, &Config{MarkSetType: "badger", UniversalColdBlocks: true})
|
||||
}
|
||||
|
||||
func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
@ -283,7 +284,7 @@ func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) {
|
||||
path := t.TempDir()
|
||||
|
||||
// open the splitstore
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -422,7 +423,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore.
|
||||
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -522,7 +523,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks
|
||||
|
||||
path := t.TempDir()
|
||||
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map"})
|
||||
ss, err := Open(path, ds, hot, cold, &Config{MarkSetType: "map", UniversalColdBlocks: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||
mx.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
}, func(cid.Cid) error { return nil })
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -13,6 +13,7 @@ var (
|
||||
b0 = blocks.NewBlock([]byte("abc"))
|
||||
b1 = blocks.NewBlock([]byte("foo"))
|
||||
b2 = blocks.NewBlock([]byte("bar"))
|
||||
b3 = blocks.NewBlock([]byte("baz"))
|
||||
)
|
||||
|
||||
func TestUnionBlockstore_Get(t *testing.T) {
|
||||
|
BIN
build/actors/v10.tar.zst
Normal file
BIN
build/actors/v10.tar.zst
Normal file
Binary file not shown.
BIN
build/actors/v9.tar.zst
Normal file
BIN
build/actors/v9.tar.zst
Normal file
Binary file not shown.
@ -1,2 +1,2 @@
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWSUZhAY3eyoPUboJ1ZWe4dNPFWTr1EPoDjbTDSAN15uhY
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDfvNrSRVGWAGbn3sm9C8z98W2x25qCZjaXGHXmGiH24e
|
||||
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWKeDMuJbouvypr1nL2qRruhNVXzv4QiLsZRh6gnvLkc7p
|
||||
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWSsACNHLGoJbPqeitNY7tom19Nxq8x5ag36eTwmgcAeLo
|
||||
|
@ -1,4 +1,4 @@
|
||||
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWJkikQQkxS58spo76BYzFt4fotaT5NpV2zngvrqm4u5ow
|
||||
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWLce5FDHR4EX4CrYavphA5xS3uDsX6aoowXh5tzDUxJav
|
||||
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWA9hFfQG9GjP6bHeuQQbMD3FDtZLdW1NayxKXUT26PQZu
|
||||
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWMHDi3LVTFG8Szqogt7RkNXvonbQYqSazxBx41A5aeuVz
|
||||
/dns4/bootstrap-0.calibration.fildev.network/tcp/1347/p2p/12D3KooWCi2w8U4DDB9xqrejb5KYHaQv2iA2AJJ6uzG3iQxNLBMy
|
||||
/dns4/bootstrap-1.calibration.fildev.network/tcp/1347/p2p/12D3KooWDTayrBojBn9jWNNUih4nNQQBGJD7Zo3gQCKgBkUsS6dp
|
||||
/dns4/bootstrap-2.calibration.fildev.network/tcp/1347/p2p/12D3KooWNRxTHUn8bf7jz1KEUPMc2dMgGfa4f8ZJTsquVSn3vHCG
|
||||
/dns4/bootstrap-3.calibration.fildev.network/tcp/1347/p2p/12D3KooWFWUqE9jgXvcKHWieYs9nhyp6NF4ftwLGAHm4sCv73jjK
|
||||
|
@ -18,6 +18,8 @@ import (
|
||||
"github.com/ipld/go-car"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
@ -28,14 +30,14 @@ var embeddedBuiltinActorReleases embed.FS
|
||||
|
||||
func init() {
|
||||
if BundleOverrides == nil {
|
||||
BundleOverrides = make(map[actors.Version]string)
|
||||
BundleOverrides = make(map[actorstypes.Version]string)
|
||||
}
|
||||
for _, av := range actors.Versions {
|
||||
path := os.Getenv(fmt.Sprintf("LOTUS_BUILTIN_ACTORS_V%d_BUNDLE", av))
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
BundleOverrides[actors.Version(av)] = path
|
||||
BundleOverrides[actorstypes.Version(av)] = path
|
||||
}
|
||||
if err := loadManifests(NetworkBundle); err != nil {
|
||||
panic(err)
|
||||
@ -55,7 +57,7 @@ func UseNetworkBundle(netw string) error {
|
||||
}
|
||||
|
||||
func loadManifests(netw string) error {
|
||||
overridden := make(map[actors.Version]struct{})
|
||||
overridden := make(map[actorstypes.Version]struct{})
|
||||
var newMetadata []*BuiltinActorsMetadata
|
||||
// First, prefer overrides.
|
||||
for av, path := range BundleOverrides {
|
||||
@ -94,7 +96,7 @@ func loadManifests(netw string) error {
|
||||
|
||||
type BuiltinActorsMetadata struct {
|
||||
Network string
|
||||
Version actors.Version
|
||||
Version actorstypes.Version
|
||||
ManifestCid cid.Cid
|
||||
Actors map[string]cid.Cid
|
||||
}
|
||||
@ -182,7 +184,7 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
|
||||
}
|
||||
bundles = append(bundles, &BuiltinActorsMetadata{
|
||||
Network: name,
|
||||
Version: actors.Version(version),
|
||||
Version: actorstypes.Version(version),
|
||||
ManifestCid: root,
|
||||
Actors: actorCids,
|
||||
})
|
||||
@ -229,7 +231,7 @@ func readBundleManifest(r io.Reader) (cid.Cid, map[string]cid.Cid, error) {
|
||||
}
|
||||
|
||||
// GetEmbeddedBuiltinActorsBundle returns the builtin-actors bundle for the given actors version.
|
||||
func GetEmbeddedBuiltinActorsBundle(version actors.Version) ([]byte, bool) {
|
||||
func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version) ([]byte, bool) {
|
||||
fi, err := embeddedBuiltinActorReleases.Open(fmt.Sprintf("actors/v%d.tar.zst", version))
|
||||
if err != nil {
|
||||
return nil, false
|
||||
|
@ -23,6 +23,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacec6xctjxybp7r3kkhase56o6jsaiua7ure5ttu2xfuojt4jhlsoa6"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec2hcqlqcfacylfcrhhliwkisvh4y3adwt47xkf2gdvodwu6ccepc"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacec35by4erhcdgcsgzp7yb3j57utydlxxfc73m3k5pep67ehvvyv6i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceajsdln7v4chxqoukiw7lxw6aexg5qdsaex2hgelz2sbu24iblhzg"),
|
||||
"cron": MustParseCid("bafk2bzacecgrwmgnqhybn3l23uvwf2n2vrcfjrprfzgd44uxers2pgr5mhsue"),
|
||||
"datacap": MustParseCid("bafk2bzacebyier2ceh27acbrq2ccv4efvzotl6qntnlrxdsrik6i4tembz6qw"),
|
||||
"init": MustParseCid("bafk2bzaceberhto43wnf4pklkd4c7d36kzslngyzyms4op7shxuswv3dtvfxu"),
|
||||
"multisig": MustParseCid("bafk2bzaceaclpbrhoqdruvsuqqgknvy2k5dywzmjoehk4uarce3uvt3w2rewu"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedzp56g5cg73oilloak3kf7u667rdkd5pgnhe2cljmr3o7ykcrzuk"),
|
||||
"reward": MustParseCid("bafk2bzacebczbwfbbi6mvppbjcozatasjiaohvjjiqcy65ccuuyyw3xiixhk2"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceawqexy6t2ybzh3jjwhbs7icbg5vqnedbbge4e4r4pfp7spkcadsu"),
|
||||
"storageminer": MustParseCid("bafk2bzacearemd7pn2jj26fdtqd4di27lfhpng3vp5chepm7qnmdzgiqr6wfi"),
|
||||
"storagepower": MustParseCid("bafk2bzaceddc7fiaxfobfegqaobf5xinjgmhsa5iu4yi6klvc3jmjimcdvgyg"),
|
||||
"system": MustParseCid("bafk2bzacedylltr57b2n6zpadh4i2c2kis4fzzvhao3kgvfaggrrbqyacew7q"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecjkesz766626ab4svnzpq3jfs26a75vfktlfaku5fjdao2eyiqyq"),
|
||||
},
|
||||
}, {
|
||||
Network: "butterflynet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaceciz4ytt5gnn6gc4epez7v6xeg6efkgbvwfxkoa34o2gj3hp5f7zc"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedavorwsriewoddjlaganjpsk3o7zfts2wyid3clv5xnctacg37j2"),
|
||||
"cron": MustParseCid("bafk2bzacebtauucwaewxuzgxfpjtmn6xt3kya4om4ugyprlkhhkde76h7fkqg"),
|
||||
"datacap": MustParseCid("bafk2bzacebzdjapqwasq6woxkgq2nm2nre3v7cl2754xwiuo2cfhvsceq4cba"),
|
||||
"eam": MustParseCid("bafk2bzacecmr4zdbpfnemvgo446qby7x4y4v5cbfespt3f6ousv2hxnflyrlk"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacebuewexvig54cuvsvwn4k4zr36tm2q5fel4ezq4v7363n2lmn362k"),
|
||||
"init": MustParseCid("bafk2bzacebww5gsctsk5hack2alkt4kh55bmpb4ywzbyyhoaskryymjj3snj6"),
|
||||
"multisig": MustParseCid("bafk2bzacec5k4wxvou34pyjd5kcsrbsfnlk4k753kkscg3ron2r7tsxollfsq"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebzdeaxglaqpmegalakmxr6secjd24mu5llo4ctoy7pvom5upyuvs"),
|
||||
"reward": MustParseCid("bafk2bzaceb4hyabxnyrrsno5erqqwk5ynnjibblzfcaq3aotlz3ek4uu6dyla"),
|
||||
"storagemarket": MustParseCid("bafk2bzacedpocbf2lg2x2jg6arw2argnwmvo2hyjqvpkrgfu4khz5mtlzxz2o"),
|
||||
"storageminer": MustParseCid("bafk2bzaceacrumah7jdfc62bmvemob4lsh5yiohwodest2cgxakgnn24cenlk"),
|
||||
"storagepower": MustParseCid("bafk2bzaceaxz6n5nywermfptnz6dc53vqsa42lic4rf66l4irm3mqfj4ak5ps"),
|
||||
"system": MustParseCid("bafk2bzaceb4w5bblgyu25ylytpmfrixjsk2ra6emd44j4mv42xfxbwnqloyzi"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedbz2koeb6teewobcjdpgfv7qdae7utgoka6wzlkf6gronnis2nn2"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 8,
|
||||
@ -40,6 +79,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzaceaqrkllksxv2jsfgjvmuewx5vbzrammw5mdscod6gkdr3ijih2q64"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceaihibfu625lbtzdp3tcftscshrmbgghgrc7kzqhxn4455pycpdkm"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacedbedgynklc4dgpyxippkxmba2mgtw7ecntoneclsvvl4klqwuyyy"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceavfgpiw6whqigmskk74z4blm22nwjfnzxb4unlqz2e4wg3c5ujpw"),
|
||||
"cron": MustParseCid("bafk2bzaceb7hxmudhvkizszbmmf2ur2qfnfxfkok3xmbrlifylx6huw4bb3s4"),
|
||||
"datacap": MustParseCid("bafk2bzaceanmwcfjfj65xy275rrfqqgoblnuqirdg6zwhc6qhbfhpphomvceu"),
|
||||
"init": MustParseCid("bafk2bzaceczqxpivlxifdo5ohr2rx5ny4uyvssm6tkf7am357xm47x472yxu2"),
|
||||
"multisig": MustParseCid("bafk2bzacec6gmi7ucukr3bk67akaxwngohw3lsg3obvdazhmfhdzflkszk3tg"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacec4kg3bfjtssvv2b4wizlbdk3pdtrg5aknzgeb3a6rmksgurpynca"),
|
||||
"reward": MustParseCid("bafk2bzacebpptqhcw6mcwdj576dgpryapdd2zfexxvqzlh3aoc24mabwgmcss"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebkfcnc27d3agm2bhzzbvvtbqahmvy2b2nf5xyj4aoxehow3bules"),
|
||||
"storageminer": MustParseCid("bafk2bzacebz4na3nq4gmumghegtkaofrv4nffiihd7sxntrryfneusqkuqodm"),
|
||||
"storagepower": MustParseCid("bafk2bzaceburxajojmywawjudovqvigmos4dlu4ifdikogumhso2ca2ccaleo"),
|
||||
"system": MustParseCid("bafk2bzaceaue3nzucbom3tcclgyaahy3iwvbqejsxrohiquakvvsjgbw3shac"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebh7dj6j7yi5vadh7lgqjtq42qi2uq4n6zy2g5vjeathacwn2tscu"),
|
||||
},
|
||||
}, {
|
||||
Network: "calibrationnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaced7wbd43lvgc55xb37mkoo4ppev6ig4jj4j7dtswtjfjq4u5qmpck"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacecq4owv5begvryvpsy4atfb2jnf7g7o4hxovtdb5a4jfkzacownli"),
|
||||
"cron": MustParseCid("bafk2bzaced4uz5w5h5wksx4end27lphd4qc4kh7q336uyt46lba5ddynwftya"),
|
||||
"datacap": MustParseCid("bafk2bzacedoc7y4s5n3p2zo4bcmafcrellkakn2e3uyf5wb3mtbuqhvwqn2l4"),
|
||||
"eam": MustParseCid("bafk2bzacealpqjgz5qmucm3v6z6hn36igx7zijixhqrxwoj3g4bdgvyml3adi"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacedmlmyy2efbt4qk5ighawiychklhzc6pzyiwvpijwvxoq3xyxlgxw"),
|
||||
"init": MustParseCid("bafk2bzaceaqcfmfylwdemq5bdcelydpf6iqfct4p7b2zwtmqyhuxn522yvic2"),
|
||||
"multisig": MustParseCid("bafk2bzacebuh55hkbkobmmoaoduruss5nsh6e2gtqtdbqsmw6e7k5vg6heyrm"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedcpzw7prdoxnaclcvmtwr6yf54zi4bzzwe5w3xknh72ji6p3qfc6"),
|
||||
"reward": MustParseCid("bafk2bzaced74ym6j424zzbr6millasfcyl3r4zm5fnauasrwn3ti6fdarbkym"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec7delr2q42yj4wu3daa5xjz4zezeivphtx3xwyvpgwpdnfoevhh2"),
|
||||
"storageminer": MustParseCid("bafk2bzaced7isnew5lhu237pdtwaqmbv65qqvfmmnve2c5yfobtfqw2fptuvc"),
|
||||
"storagepower": MustParseCid("bafk2bzacebe5frk6gcgzcvzkxavhhbs3id3iyacybn7y7gxwzgl5t6zawzswg"),
|
||||
"system": MustParseCid("bafk2bzacectivaezqijucle5s2f7xeui5uxig7bnk7fe4vsvz3xu7agjtb2ge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceczgwckte4exultjxyzgzoo6m6r5coyphnlappi4clethhhybslxc"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 8,
|
||||
@ -57,6 +135,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacebu47th3xerlngqavlipb6cfu2utljkxxzgadc3totogto2tmx2jc"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceci3niq3rmbcmepgn27zvlgci6d5t4dvthx3pbmmx3wcu5elova6i"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacedo6tmei6rzjaaddh2yffe5xgr6w4smnadofjhomc3saiv3ubplqe"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacebb32htqlwcwiotyvtbeehfmluu2ubjnepo57gelelwitudrstwba"),
|
||||
"cron": MustParseCid("bafk2bzaceatvkww7soy4a6onu6xhe7pzkdzkqw46ywuu56yv3ncl76xpotzqu"),
|
||||
"datacap": MustParseCid("bafk2bzaced57nk7i7w6qmbosy4gd6atme6yppesdgjllou6nppbti5yw6glcg"),
|
||||
"init": MustParseCid("bafk2bzacedtoputbtz573ytg4yo5wbbg7fbhrzplux4uknxrb2jarifcuxxou"),
|
||||
"multisig": MustParseCid("bafk2bzacec22z3xz45mbwgtliwkj7ngc43bervnt557c6dqsg6aesatpd5isy"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedym7xnaxr2igfq72rttj2adqyqqfxk3j4qovp2bcwqk5paoe4t7e"),
|
||||
"reward": MustParseCid("bafk2bzacedemsmbmbtk5toprmm6jivjq3wkxumavc65vpvm6ngspgjfkth7z6"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecb53mmklf4rbv263dvufqj3nsf7mi6zk2tjlgwmzbr633kw3ds3w"),
|
||||
"storageminer": MustParseCid("bafk2bzacea3wljpn2ixgnd4lovr6yckiwd652ytcrz5amgj47lg6drjhgggqa"),
|
||||
"storagepower": MustParseCid("bafk2bzaceakvohgvovpeldb6hjfg7readxo37a5h4qauis4nz6pte7mcll6c2"),
|
||||
"system": MustParseCid("bafk2bzacecisuqj2ln7ep72xaejvs2lrgh2logc7retxxpd3qvobymwyz7bxo"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacebyjosiripwqyf56yhjfs5hg26mch7totsqth4rgpt5j32hqg6ric"),
|
||||
},
|
||||
}, {
|
||||
Network: "caterpillarnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea5csj2os7h76a6yvf6shgpwkysawijxemk5uvvzejxrwjo6ir4yg"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacea7tpruyxdgyz4xa7curiphwdw4abmspft3ee24puruazdcl3tq5c"),
|
||||
"cron": MustParseCid("bafk2bzacebc6kkj7kzsicm5baszjgd37b4b3kijsffqmmkhhjlyd7zhkwfcqm"),
|
||||
"datacap": MustParseCid("bafk2bzaceddcmwl6po2jd3tfkkgv4zvub7i47gsx33pkqdspqhgvhe4npc4as"),
|
||||
"eam": MustParseCid("bafk2bzaceccsvcww2rmqnh4plkq6oapqaeqbhydrtup54z4dwunolz5tpgtb4"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacea5sig3zpxfkqppoj3t344cvuhzvkx6ge2isgdzc34rfpng2ogdje"),
|
||||
"init": MustParseCid("bafk2bzacedtby353aho7itoyoj7w6moydmigjm3sgy6djgnfxqehlpae4vcc2"),
|
||||
"multisig": MustParseCid("bafk2bzacedyguvwz5zfveqoqicn3j6lkdzipf247nhvdi6dvmahulr7nzgox6"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceavaatmmnsz3v3ksopcbu6jx4iq7u7nnmqbclsiabsfkfu3zfpmka"),
|
||||
"reward": MustParseCid("bafk2bzacecrphs4avteik4yejsqwkpy5bcqramdhnzykbfq3uu2qalj2p26ti"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceajby2jb5m3fenzarum374zxdzuyrpkspfljwovu7c3hvyceqd5sa"),
|
||||
"storageminer": MustParseCid("bafk2bzacebqtn7jdvk756ighri5ajro6gjepnef3c6rxupbbgkth62zytiy5s"),
|
||||
"storagepower": MustParseCid("bafk2bzacedwlo32brlalpovfkkk7qwo3ou2kpgv2bf7fioy5srn7uejmn7n46"),
|
||||
"system": MustParseCid("bafk2bzacebbt63h26x5vw5fdo2pmdb4q65u3t6lilkugvmjar6zfsc7ethxsi"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecr5kbyypdxnxlepzk5sji2k72t454vto5ok4owfcuwfpeyivjtu4"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 8,
|
||||
@ -74,6 +191,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacecf7eta2stfd3cnuxzervd33imbvlaqq6b5tsho7pxmhifrybreru"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceaajgtglewgitshgdi2nzrvq7eihjtyqj5yiamesqun2hujl3xev2"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacedozk3jh2j4nobqotkbofodq4chbrabioxbfrygpldgoxs3zwgggk"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaced5llqnqqhypolyuogz3h2wjomugqkrhyhocvly3aoib4c5xiush6"),
|
||||
"cron": MustParseCid("bafk2bzaceahwdt32ji53mo5yz6imvztz3s3g2ra5uz3jdfa77j7hqcnq6r4l2"),
|
||||
"datacap": MustParseCid("bafk2bzaceabcxoy5iscdierasorjoj6xzqgnnb5pmrr7prkuibw4yggx3v2d2"),
|
||||
"init": MustParseCid("bafk2bzaceastwn42kqyztz7uzej7l4lemp5nakqqsfvksry7k75q5ombhprme"),
|
||||
"multisig": MustParseCid("bafk2bzacebeiygkjupkpfxcrsidci4bvn6afkvx4lsj3ut3ywhsj654pzfgk4"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacedhsdoo4ww47rm44pizu5qqpho753cizzbbvnd5yz3nm3347su5cy"),
|
||||
"reward": MustParseCid("bafk2bzacebzqvisqe3iaodtxq7l2lgzwfkxznrnp676ddpllqcpvuae5i33le"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceduauegz4nniegh667btjhg2anipwpxeb664s4ossq2ifvuqwqlso"),
|
||||
"storageminer": MustParseCid("bafk2bzacec23wjdmbm5pt6pqsbjb3w6j7vyrolijz2mysvp6clllfgpmhb6ge"),
|
||||
"storagepower": MustParseCid("bafk2bzacebnyywv46n2ghg62inllwpmnyuwtoz57fn5lpgpf436mahajg4qrg"),
|
||||
"system": MustParseCid("bafk2bzacebgafb6h2o2g5whrujc2uvsttrussyc5t56rvhrjqkqhzdu4jopwa"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacednorhcy446agy7ecpmfms2u4aoa3mj2eqomffuoerbik5yavrxyi"),
|
||||
},
|
||||
}, {
|
||||
Network: "devnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea73thrlpfejrswlcu5uhe7rcgdewvmrcwoef6jzngsba3i4v5ibi"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceau2o55aripm7kqrbzzog72zcduv5psnxzpohx5rdkykepc4z7aag"),
|
||||
"cron": MustParseCid("bafk2bzacec5qc5xluwikf4lolfa4oe356iwep25tiezbxfdyg5jib54rhlh6q"),
|
||||
"datacap": MustParseCid("bafk2bzacebo47u6q3xou5exsecjpa4rpfqjfm7vyhz4qlr3nk7p46trsk4occ"),
|
||||
"eam": MustParseCid("bafk2bzacea6yeptevserd7ayf4ahokor4sdpizpxpbqwkuvvhzdkon672shsm"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzacebi46zgjili4luu3nqy6mno5k4skvo4cvs7genhkdfaukhtw7xirw"),
|
||||
"init": MustParseCid("bafk2bzacedvf2bij6jovem2dfzkz347yvmydxj7vlgaiagosz5t3c5jyy43zu"),
|
||||
"multisig": MustParseCid("bafk2bzacecukolwx6y5pcajnxg2aawiubgxo5zyj24a23zg5t4qu3k4qbofh4"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacecwyih7nodrwsw5vyl5zk7fapklje76jpowqjr6x6br2bm55smqqy"),
|
||||
"reward": MustParseCid("bafk2bzacea6vfrcprxg2i4l5qnigf4c6pyvnjxpzfqr4pmph3elif7sfidrei"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceahradb3od4ahs46x6yriwvm36iabgtohhoiolubsumto5eravzbu"),
|
||||
"storageminer": MustParseCid("bafk2bzacedekivqgvqapbepvzn6jte3xyymyg5yjuwy42xvboa6rcqnzgo74u"),
|
||||
"storagepower": MustParseCid("bafk2bzacedkmiosllqqqarmr53twspyswdvsm7givwczgo3qqsxzpad4hzjma"),
|
||||
"system": MustParseCid("bafk2bzaceagdymtxb4lxqqjgmnphbgdtdgveuuqaouswpzagj4bpbon3ptop4"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacec556wsqldm22k2abshvvnsrawlm3bbqkwzht6ubcj76m2jsy3azi"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 8,
|
||||
@ -91,6 +247,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzacedwq5uppsw7vp55zpj7jdieizirmldceehu6wvombw3ixq2tcq57w"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceb3zbkjz3auizmoln2unmxep7dyfcmsre64vnqfhdyh7rkqfoxlw4"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzaceb6j6666h36xnhksu3ww4kxb6e25niayfgkdnifaqi6m6ooc66i6i"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacect2p7urje3pylrrrjy3tngn6yaih4gtzauuatf2jllk3ksgfiw2y"),
|
||||
"cron": MustParseCid("bafk2bzacebcec3lffmos3nawm5cvwehssxeqwxixoyyfvejy7viszzsxzyu26"),
|
||||
"datacap": MustParseCid("bafk2bzacebb6uy2ys7tapekmtj7apnjg7oyj4ia5t7tlkvbmwtxwv74lb2pug"),
|
||||
"init": MustParseCid("bafk2bzacebtdq4zyuxk2fzbdkva6kc4mx75mkbfmldplfntayhbl5wkqou33i"),
|
||||
"multisig": MustParseCid("bafk2bzacec4va3nmugyqjqrs3lqyr2ij67jhjia5frvx7omnh2isha6abxzya"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebhdvjbjcgupklddfavzef4e4gnkt3xk3rbmgfmk7xhecszhfxeds"),
|
||||
"reward": MustParseCid("bafk2bzacebezgbbmcm2gbcqwisus5fjvpj7hhmu5ubd37phuku3hmkfulxm2o"),
|
||||
"storagemarket": MustParseCid("bafk2bzacec3j7p6gklk64stax5px3xxd7hdtejaepnd4nw7s2adihde6emkcu"),
|
||||
"storageminer": MustParseCid("bafk2bzacedyux5hlrildwutvvjdcsvjtwsoc5xnqdjl73ouiukgklekeuyfl4"),
|
||||
"storagepower": MustParseCid("bafk2bzacedsetphfajgne4qy3vdrpyd6ekcmtfs2zkjut4r34cvnuoqemdrtw"),
|
||||
"system": MustParseCid("bafk2bzaceagvlo2jtahj7dloshrmwfulrd6e2izqev32qm46eumf754weec6c"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacecf3yodlyudzukumehbuabgqljyhjt5ifiv4vetcfohnvsxzynwga"),
|
||||
},
|
||||
}, {
|
||||
Network: "mainnet",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzaceduyggnyqhlr346hfw32tbobzrvhzhill33zhe7jw64pmwjci2xoc"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzacedmr3wxl7qmhquageorrt3aavbzqfpm7eymxidakwuhaobu7dseqs"),
|
||||
"cron": MustParseCid("bafk2bzaceblekxapm5nnqnxmw3mk27236iyutvbhhpsc3fyde7zi7guccn7cc"),
|
||||
"datacap": MustParseCid("bafk2bzacedu4jevyvqsilq7bq4uhegbkm75muwebc5ifqpfaojwhexf2j4i6a"),
|
||||
"eam": MustParseCid("bafk2bzacedc7224twbolvdq6iwc7ybdpah2ywe3ueo33jv67ecimndinle374"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaceaggldo6wmkvp5innv4pnjv4xnpedspzofvma3dhu7vk45hh5djoq"),
|
||||
"init": MustParseCid("bafk2bzacedutlaebaczkdi4vqvt3xim24u3whleqk2r4lufjd5jnmxcosea6q"),
|
||||
"multisig": MustParseCid("bafk2bzaceatiqxjwtugpzus3s52zoggnrftxqn7kiw3obvjgkjvtd6zr3636q"),
|
||||
"paymentchannel": MustParseCid("bafk2bzacebyviac6i43gtsvmjfg6mzcp6rwgz44axidc7m432btbmvt7i2m2g"),
|
||||
"reward": MustParseCid("bafk2bzacecbcnlvk2izojpfoaksitqenhzaofn6ynxx5pegl4y45wjlouexdi"),
|
||||
"storagemarket": MustParseCid("bafk2bzacebobteeoz2jycplgtydfyltzughegz2sopn6pzy2udjfvuo77joyk"),
|
||||
"storageminer": MustParseCid("bafk2bzacecwcypas3y6u4rya7qolfwmou437xgrjxh7mnnim7bo3nhk4dscxw"),
|
||||
"storagepower": MustParseCid("bafk2bzacec62kids6rcrdmdeqhwiz3s5rs35s5gn25ilwemgmm6jqnr2rnaaq"),
|
||||
"system": MustParseCid("bafk2bzacecj3c4bjbs2xfttn7zqle7yocqh47u2s7hwuxrsn7fi5h74tcyxoc"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacedgf7zbnlste5ukzueduemkimiit64scz7lvebztufx5jxtx6gkz2"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 8,
|
||||
@ -108,6 +303,45 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzaced6kjkbv7lrb2qwq5we2hqaxc6ztch5p52g27qtjy45zdemsk4b7m"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacecnnrmekqw2xvud46g3vo6x26cogh3ydgljqajlxqxzzbuxsjlwjm"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"),
|
||||
"cron": MustParseCid("bafk2bzacecla36w3tbwap5jgdtooxsud25mdpc75kgtjs34mi4xhwygph2gki"),
|
||||
"datacap": MustParseCid("bafk2bzaced5h3ct6i7oqpyimkj3hwdywmux5tslu5vs2ywbzruqmxjtqczygs"),
|
||||
"init": MustParseCid("bafk2bzaceauxqpspnvui7dryuvfgzoogatbkbahp4ovaih734blwi4bassnlm"),
|
||||
"multisig": MustParseCid("bafk2bzaceddfagxfpsihjxq7yt4ditv2tcoou5w4hzbsapadlw3v44cxfcqpi"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaced4nc4ofrbqevpwrt7fnf3beshi5ccrecq3zojt2sxgrkz7ebnbh4"),
|
||||
"reward": MustParseCid("bafk2bzacedxleepeg4ei3jnayzcfz6shi25rrvoyhr6fxmkdezq4owrazi7rq"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceakqcjpppg3exrr7dru7jglvno2xyw4hsuebxay4lvrzvmwmv5kvu"),
|
||||
"storageminer": MustParseCid("bafk2bzacealfvphicwnysmmyyerseppyvydy2reisvbft46vdprp2lnfvlgqc"),
|
||||
"storagepower": MustParseCid("bafk2bzaceageil5b5mr5uwo6vqs4nnnmpiwe3fkjffzyngcicuu7gruuwapjm"),
|
||||
"system": MustParseCid("bafk2bzacedo4pu3iwx2gu72hinsstpiokhl5iicnb3rumzffsnhy7zhmnxhyy"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacearlgbespxi2zdrybtp2rrbwscmtbyou5qa2egbdvcz6v2yjjqvjo"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceba6me5ipkcijuhyypnzjydhv3ebi2ctailar7mtzlk4vk3rbxfee"),
|
||||
"cron": MustParseCid("bafk2bzacea6k2mai2xnakygqvbigivfrvv5q7d34qrzjv2crkqtwbjxnxmkbe"),
|
||||
"datacap": MustParseCid("bafk2bzaceah4oxcgck6bcfkzctm2klpvmltyidq7uxnlkcap6ypi3lnkcvrqk"),
|
||||
"eam": MustParseCid("bafk2bzacedjtkvocrnkrot2oztsfrxtpwl32wwbmbkrjfbbm4xipwzrhhxn5c"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaced6vhabkr2ojpjzsybrq5yvksjzpjk6yei6fwobkwwydlj5f473pw"),
|
||||
"init": MustParseCid("bafk2bzaceaib3o5e7wop7kwjirgpferqarmngrgjkur2yhdnwplctidpxsgme"),
|
||||
"multisig": MustParseCid("bafk2bzaced4z3awacxumq6yr33a3adu2legb7colahgvqpmigs3fvvjxs3byc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb6mfi24mpzt7qlkratj2tdtqo7aia67zcztuslrxcjaycz6fnai6"),
|
||||
"reward": MustParseCid("bafk2bzacebngh5kwtem4ncarpjtxhs4rwyoficttkgxlsjtiz5ucdi4p3czoc"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecnsibyil62jfq2gbkoe6c2epehfcrxzjmqjnwz7kxab2hkbu3lks"),
|
||||
"storageminer": MustParseCid("bafk2bzacedzw4vkrt3sdkhagpvn62pknyyjkcrzewncvtvae5qgwe6ulzx4a4"),
|
||||
"storagepower": MustParseCid("bafk2bzacedxgadibot6nzvripqt3z5shvjsoscupinejnsvswq4cbeskblwyy"),
|
||||
"system": MustParseCid("bafk2bzacedm24avrmp5o5odhpad43qeglooflygwh4ah7qnzbij2h4c3v6cge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceapq3j6ww3ofytwq3pz3obumaqsyg3wrm6tksdh7op23a72co3rya"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 8,
|
||||
@ -125,4 +359,43 @@ var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMet
|
||||
"system": MustParseCid("bafk2bzaced6kjkbv7lrb2qwq5we2hqaxc6ztch5p52g27qtjy45zdemsk4b7m"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzacectzxvtoselhnzsair5nv6k5vokvegnht6z2lfee4p3xexo4kg4m6"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 9,
|
||||
ManifestCid: MustParseCid("bafy2bzacecql2gj2tri4fnbznmldue73qzt6zszvugw4exd64mwb52zrhv7k2"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceaiebfiuu76zoywzltelio2zuvsavirka27ur6kspn7scvcl5cuiy"),
|
||||
"cron": MustParseCid("bafk2bzacecla36w3tbwap5jgdtooxsud25mdpc75kgtjs34mi4xhwygph2gki"),
|
||||
"datacap": MustParseCid("bafk2bzaced5h3ct6i7oqpyimkj3hwdywmux5tslu5vs2ywbzruqmxjtqczygs"),
|
||||
"init": MustParseCid("bafk2bzaceauxqpspnvui7dryuvfgzoogatbkbahp4ovaih734blwi4bassnlm"),
|
||||
"multisig": MustParseCid("bafk2bzaceddfagxfpsihjxq7yt4ditv2tcoou5w4hzbsapadlw3v44cxfcqpi"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaced4nc4ofrbqevpwrt7fnf3beshi5ccrecq3zojt2sxgrkz7ebnbh4"),
|
||||
"reward": MustParseCid("bafk2bzacedxleepeg4ei3jnayzcfz6shi25rrvoyhr6fxmkdezq4owrazi7rq"),
|
||||
"storagemarket": MustParseCid("bafk2bzaceakqcjpppg3exrr7dru7jglvno2xyw4hsuebxay4lvrzvmwmv5kvu"),
|
||||
"storageminer": MustParseCid("bafk2bzaceab3cjrwwwfemyc5lw73w6tibpgxtx3wuzjhami6tvhcvetygdm7m"),
|
||||
"storagepower": MustParseCid("bafk2bzaceafemwhsy3e7ueqsrn3f7n53vdqkvfbig3hgbw7eohsefnfvgq7yc"),
|
||||
"system": MustParseCid("bafk2bzacedo4pu3iwx2gu72hinsstpiokhl5iicnb3rumzffsnhy7zhmnxhyy"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceatmqip2o3ausbntvdhj7yemu6hb3b5yqv6hm42gylbbmz7geocpm"),
|
||||
},
|
||||
}, {
|
||||
Network: "testing-fake-proofs",
|
||||
Version: 10,
|
||||
ManifestCid: MustParseCid("bafy2bzacea4irr2oxhclwt4mvtrevbzb7mbqddcebjz7bkqjq6eoflpfhencc"),
|
||||
Actors: map[string]cid.Cid{
|
||||
"account": MustParseCid("bafk2bzaceba6me5ipkcijuhyypnzjydhv3ebi2ctailar7mtzlk4vk3rbxfee"),
|
||||
"cron": MustParseCid("bafk2bzacea6k2mai2xnakygqvbigivfrvv5q7d34qrzjv2crkqtwbjxnxmkbe"),
|
||||
"datacap": MustParseCid("bafk2bzaceah4oxcgck6bcfkzctm2klpvmltyidq7uxnlkcap6ypi3lnkcvrqk"),
|
||||
"eam": MustParseCid("bafk2bzacedjtkvocrnkrot2oztsfrxtpwl32wwbmbkrjfbbm4xipwzrhhxn5c"),
|
||||
"embryo": MustParseCid("bafk2bzacebj2mj5zlcs3yjlgpbznzistfjkdlwaoncjziliqrxqavvz4dcvnk"),
|
||||
"evm": MustParseCid("bafk2bzaced6vhabkr2ojpjzsybrq5yvksjzpjk6yei6fwobkwwydlj5f473pw"),
|
||||
"init": MustParseCid("bafk2bzaceaib3o5e7wop7kwjirgpferqarmngrgjkur2yhdnwplctidpxsgme"),
|
||||
"multisig": MustParseCid("bafk2bzaced4z3awacxumq6yr33a3adu2legb7colahgvqpmigs3fvvjxs3byc"),
|
||||
"paymentchannel": MustParseCid("bafk2bzaceb6mfi24mpzt7qlkratj2tdtqo7aia67zcztuslrxcjaycz6fnai6"),
|
||||
"reward": MustParseCid("bafk2bzacebngh5kwtem4ncarpjtxhs4rwyoficttkgxlsjtiz5ucdi4p3czoc"),
|
||||
"storagemarket": MustParseCid("bafk2bzacecnsibyil62jfq2gbkoe6c2epehfcrxzjmqjnwz7kxab2hkbu3lks"),
|
||||
"storageminer": MustParseCid("bafk2bzaceb4grddnw54gczgcdak5a2gqvwed66mhibbug6qu4jy35bf45jltg"),
|
||||
"storagepower": MustParseCid("bafk2bzacedp2dnbk4bg3hhaeztre4q3jv7eqs267rlafszpggb2njjn3x5eru"),
|
||||
"system": MustParseCid("bafk2bzacedm24avrmp5o5odhpad43qeglooflygwh4ah7qnzbij2h4c3v6cge"),
|
||||
"verifiedregistry": MustParseCid("bafk2bzaceapq3j6ww3ofytwq3pz3obumaqsyg3wrm6tksdh7op23a72co3rya"),
|
||||
},
|
||||
}}
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
)
|
||||
@ -19,16 +21,18 @@ func TestEmbeddedMetadata(t *testing.T) {
|
||||
|
||||
// Test that we're registering the manifest correctly.
|
||||
func TestRegistration(t *testing.T) {
|
||||
manifestCid, found := actors.GetManifest(actors.Version8)
|
||||
for _, av := range []actorstypes.Version{actorstypes.Version8, actorstypes.Version9} {
|
||||
manifestCid, found := actors.GetManifest(av)
|
||||
require.True(t, found)
|
||||
require.True(t, manifestCid.Defined())
|
||||
|
||||
for _, key := range actors.GetBuiltinActorsKeys() {
|
||||
actorCid, found := actors.GetActorCodeID(actors.Version8, key)
|
||||
for _, key := range actors.GetBuiltinActorsKeys(av) {
|
||||
actorCid, found := actors.GetActorCodeID(av, key)
|
||||
require.True(t, found)
|
||||
name, version, found := actors.GetActorMetaByCode(actorCid)
|
||||
require.True(t, found)
|
||||
require.Equal(t, actors.Version8, version)
|
||||
require.Equal(t, av, version)
|
||||
require.Equal(t, key, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +69,10 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
||||
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
|
||||
},
|
||||
DrandIncentinet: {
|
||||
Servers: []string{
|
||||
"https://dev1.drand.sh",
|
||||
"https://dev2.drand.sh",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
|
||||
},
|
||||
}
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -10,9 +10,9 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
@ -20,9 +20,9 @@ const BootstrappersFile = ""
|
||||
const GenesisFile = ""
|
||||
|
||||
var NetworkBundle = "devnet"
|
||||
var BundleOverrides map[actors.Version]string
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
const GenesisNetworkVersion = network.Version16
|
||||
const GenesisNetworkVersion = network.Version18
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
@ -56,6 +56,8 @@ var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||
|
||||
var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||
|
||||
var UpgradeSharkHeight = abi.ChainEpoch(-20)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
@ -107,6 +109,7 @@ func init() {
|
||||
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
||||
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||
UpgradeSkyrHeight = getUpgradeHeight("LOTUS_SKYR_HEIGHT", UpgradeSkyrHeight)
|
||||
UpgradeSharkHeight = getUpgradeHeight("LOTUS_SHARK_HEIGHT", UpgradeSharkHeight)
|
||||
|
||||
BuildType |= Build2k
|
||||
|
||||
|
@ -8,10 +8,10 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
@ -19,10 +19,10 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
const GenesisNetworkVersion = network.Version15
|
||||
const GenesisNetworkVersion = network.Version16
|
||||
|
||||
var NetworkBundle = "butterflynet"
|
||||
var BundleOverrides map[actors.Version]string
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
const BootstrappersFile = "butterflynet.pi"
|
||||
const GenesisFile = "butterflynet.car"
|
||||
@ -47,10 +47,9 @@ const UpgradeNorwegianHeight = -14
|
||||
const UpgradeTurboHeight = -15
|
||||
const UpgradeHyperdriveHeight = -16
|
||||
const UpgradeChocolateHeight = -17
|
||||
|
||||
const UpgradeOhSnapHeight = -18
|
||||
|
||||
const UpgradeSkyrHeight = abi.ChainEpoch(50)
|
||||
const UpgradeSkyrHeight = -19
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(600)
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg512MiBV1,
|
||||
|
@ -4,14 +4,17 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
@ -22,7 +25,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
var NetworkBundle = "calibrationnet"
|
||||
var BundleOverrides map[actors.Version]string
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
const BootstrappersFile = "calibnet.pi"
|
||||
const GenesisFile = "calibnet.car"
|
||||
@ -58,13 +61,13 @@ const UpgradeTurboHeight = 390
|
||||
|
||||
const UpgradeHyperdriveHeight = 420
|
||||
|
||||
const UpgradeChocolateHeight = 312746
|
||||
const UpgradeChocolateHeight = 450
|
||||
|
||||
// 2022-02-10T19:23:00Z
|
||||
const UpgradeOhSnapHeight = 682006
|
||||
const UpgradeOhSnapHeight = 480
|
||||
|
||||
// 2022-06-16T17:30:00Z
|
||||
const UpgradeSkyrHeight = 1044660
|
||||
const UpgradeSkyrHeight = 510
|
||||
|
||||
const UpgradeSharkHeight = 16800 // 6 days after genesis
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
@ -84,13 +87,28 @@ func init() {
|
||||
|
||||
Devnet = true
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
||||
//set this value too high may impacts your block submission; set this value too low may cause you miss
|
||||
//parent tipsets for blocking forming and mining.
|
||||
if len(os.Getenv("PROPAGATION_DELAY_SECS")) != 0 {
|
||||
pds, err := strconv.ParseUint(os.Getenv("PROPAGATION_DELAY_SECS"), 10, 64)
|
||||
if err != nil {
|
||||
log.Warnw("Error setting PROPAGATION_DELAY_SECS, %v, proceed with default value %s", err,
|
||||
PropagationDelaySecs)
|
||||
} else {
|
||||
PropagationDelaySecs = pds
|
||||
log.Warnw(" !!WARNING!! propagation delay is set to be %s second, "+
|
||||
"this value impacts your message republish interval and block forming - monitor with caution!!", PropagationDelaySecs)
|
||||
}
|
||||
}
|
||||
|
||||
BuildType = BuildCalibnet
|
||||
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
var PropagationDelaySecs = uint64(10)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
@ -11,20 +11,20 @@ import (
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
var NetworkBundle = "caterpillarnet"
|
||||
var BundleOverrides map[actors.Version]string
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
const BootstrappersFile = "interopnet.pi"
|
||||
const GenesisFile = "interopnet.car"
|
||||
|
||||
const GenesisNetworkVersion = network.Version15
|
||||
const GenesisNetworkVersion = network.Version16
|
||||
|
||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||
|
||||
@ -34,26 +34,22 @@ var UpgradeSmokeHeight = abi.ChainEpoch(-1)
|
||||
var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
|
||||
var UpgradeRefuelHeight = abi.ChainEpoch(-3)
|
||||
var UpgradeTapeHeight = abi.ChainEpoch(-4)
|
||||
|
||||
var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
|
||||
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
|
||||
|
||||
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
|
||||
var UpgradeCalicoHeight = abi.ChainEpoch(-9)
|
||||
var UpgradePersianHeight = abi.ChainEpoch(-10)
|
||||
var UpgradeOrangeHeight = abi.ChainEpoch(-11)
|
||||
var UpgradeClausHeight = abi.ChainEpoch(-12)
|
||||
|
||||
var UpgradeTrustHeight = abi.ChainEpoch(-13)
|
||||
|
||||
var UpgradeNorwegianHeight = abi.ChainEpoch(-14)
|
||||
|
||||
var UpgradeTurboHeight = abi.ChainEpoch(-15)
|
||||
|
||||
var UpgradeHyperdriveHeight = abi.ChainEpoch(-16)
|
||||
var UpgradeChocolateHeight = abi.ChainEpoch(-17)
|
||||
var UpgradeOhSnapHeight = abi.ChainEpoch(-18)
|
||||
var UpgradeSkyrHeight = abi.ChainEpoch(100)
|
||||
var UpgradeSkyrHeight = abi.ChainEpoch(-19)
|
||||
|
||||
const UpgradeSharkHeight = abi.ChainEpoch(99999999999999)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -107,6 +103,7 @@ func init() {
|
||||
UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight)
|
||||
UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight)
|
||||
UpgradeSkyrHeight = getUpgradeHeight("LOTUS_SKYR_HEIGHT", UpgradeSkyrHeight)
|
||||
UpgradeSharkHeight = getUpgradeHeight("LOTUS_SHARK_HEIGHT", UpgradeSharkHeight)
|
||||
|
||||
BuildType |= BuildInteropnet
|
||||
SetAddressNetwork(address.Testnet)
|
||||
|
@ -6,13 +6,13 @@ package build
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
@ -23,7 +23,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
var NetworkBundle = "mainnet"
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical.
|
||||
var BundleOverrides map[actors.Version]string
|
||||
var BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
const GenesisNetworkVersion = network.Version0
|
||||
|
||||
@ -77,23 +77,41 @@ const UpgradeChocolateHeight = 1231620
|
||||
const UpgradeOhSnapHeight = 1594680
|
||||
|
||||
// 2022-07-06T14:00:00Z
|
||||
var UpgradeSkyrHeight = abi.ChainEpoch(1960320)
|
||||
const UpgradeSkyrHeight = 1960320
|
||||
|
||||
// 2022-11-30T14:00:00Z
|
||||
var UpgradeSharkHeight = abi.ChainEpoch(2383680)
|
||||
|
||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
}
|
||||
var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
|
||||
var MinVerifiedDealSize = abi.NewStoragePower(1 << 20)
|
||||
var PreCommitChallengeDelay = abi.ChainEpoch(150)
|
||||
var PropagationDelaySecs = uint64(10)
|
||||
|
||||
func init() {
|
||||
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
|
||||
if os.Getenv("LOTUS_DISABLE_SKYR") == "1" {
|
||||
UpgradeSkyrHeight = math.MaxInt64
|
||||
if os.Getenv("LOTUS_DISABLE_SHARK") == "1" {
|
||||
UpgradeSharkHeight = math.MaxInt64
|
||||
}
|
||||
|
||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
||||
//set this value too high may impacts your block submission; set this value too low may cause you miss
|
||||
//parent tipsets for blocking forming and mining.
|
||||
if len(os.Getenv("PROPAGATION_DELAY_SECS")) != 0 {
|
||||
pds, err := strconv.ParseUint(os.Getenv("PROPAGATION_DELAY_SECS"), 10, 64)
|
||||
if err != nil {
|
||||
log.Warnw("Error setting PROPAGATION_DELAY_SECS, %v, proceed with default value %s", err,
|
||||
PropagationDelaySecs)
|
||||
} else {
|
||||
PropagationDelaySecs = pds
|
||||
log.Warnw(" !!WARNING!! propagation delay is set to be %s second, "+
|
||||
"this value impacts your message republish interval and block forming - monitor with caution!!", PropagationDelaySecs)
|
||||
}
|
||||
}
|
||||
|
||||
Devnet = false
|
||||
@ -103,8 +121,6 @@ func init() {
|
||||
|
||||
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
|
||||
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
|
||||
const BootstrapPeerThreshold = 4
|
||||
|
||||
|
@ -26,14 +26,14 @@ const UnixfsLinksPerLevel = 1024
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
|
||||
// TODO: This is still terrible...What's the impact of updating this before mainnet actually upgrades
|
||||
// Used by tests and some obscure tooling
|
||||
/* inline-gen template
|
||||
|
||||
const NewestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
||||
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
||||
|
||||
/* inline-gen start */
|
||||
|
||||
const NewestNetworkVersion = network.Version16
|
||||
const TestNetworkVersion = network.Version18
|
||||
|
||||
/* inline-gen end */
|
||||
|
||||
@ -118,8 +118,9 @@ const VerifSigCacheSize = 32000
|
||||
// TODO: If this is gonna stay, it should move to specs-actors
|
||||
const BlockMessageLimit = 10000
|
||||
|
||||
const BlockGasLimit = 10_000_000_000
|
||||
const BlockGasTarget = BlockGasLimit / 2
|
||||
var BlockGasLimit = int64(10_000_000_000)
|
||||
var BlockGasTarget = BlockGasLimit / 2
|
||||
|
||||
const BaseFeeMaxChangeDenom = 8 // 12.5%
|
||||
const InitialBaseFee = 100e6
|
||||
const MinimumBaseFee = 100
|
||||
|
@ -13,10 +13,10 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
@ -107,6 +107,7 @@ var (
|
||||
UpgradeChocolateHeight abi.ChainEpoch = -16
|
||||
UpgradeOhSnapHeight abi.ChainEpoch = -17
|
||||
UpgradeSkyrHeight abi.ChainEpoch = -18
|
||||
UpgradeSharkHeight abi.ChainEpoch = -19
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
@ -114,10 +115,10 @@ var (
|
||||
|
||||
GenesisNetworkVersion = network.Version0
|
||||
NetworkBundle = "devnet"
|
||||
BundleOverrides map[actors.Version]string
|
||||
BundleOverrides map[actorstypes.Version]string
|
||||
|
||||
NewestNetworkVersion = network.Version15
|
||||
ActorUpgradeNetworkVersion = network.Version15
|
||||
NewestNetworkVersion = network.Version16
|
||||
ActorUpgradeNetworkVersion = network.Version16
|
||||
|
||||
Devnet = true
|
||||
ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
|
||||
|
@ -37,7 +37,7 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
const BuildVersion = "1.17.2-dev"
|
||||
const BuildVersion = "1.19.1-dev"
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
@ -14,10 +15,10 @@ import (
|
||||
)
|
||||
|
||||
// GetActorCodeID looks up a builtin actor's code CID by actor version and canonical actor name.
|
||||
func GetActorCodeID(av Version, name string) (cid.Cid, bool) {
|
||||
func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
|
||||
|
||||
// Actors V8 and above
|
||||
if av >= Version8 {
|
||||
if av >= actorstypes.Version8 {
|
||||
if cids, ok := GetActorCodeIDsFromManifest(av); ok {
|
||||
c, ok := cids[name]
|
||||
return c, ok
|
||||
@ -30,275 +31,275 @@ func GetActorCodeID(av Version, name string) (cid.Cid, bool) {
|
||||
case AccountKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.AccountActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.AccountActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.AccountActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.AccountActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.AccountActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.AccountActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.AccountActorCodeID, true
|
||||
}
|
||||
|
||||
case CronKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.CronActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.CronActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.CronActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.CronActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.CronActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.CronActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.CronActorCodeID, true
|
||||
}
|
||||
|
||||
case InitKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.InitActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.InitActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.InitActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.InitActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.InitActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.InitActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.InitActorCodeID, true
|
||||
}
|
||||
|
||||
case MarketKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.StorageMarketActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.StorageMarketActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.StorageMarketActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.StorageMarketActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.StorageMarketActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.StorageMarketActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.StorageMarketActorCodeID, true
|
||||
}
|
||||
|
||||
case MinerKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.StorageMinerActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.StorageMinerActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.StorageMinerActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.StorageMinerActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.StorageMinerActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.StorageMinerActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.StorageMinerActorCodeID, true
|
||||
}
|
||||
|
||||
case MultisigKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.MultisigActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.MultisigActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.MultisigActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.MultisigActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.MultisigActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.MultisigActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.MultisigActorCodeID, true
|
||||
}
|
||||
|
||||
case PaychKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.PaymentChannelActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.PaymentChannelActorCodeID, true
|
||||
}
|
||||
|
||||
case PowerKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.StoragePowerActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.StoragePowerActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.StoragePowerActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.StoragePowerActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.StoragePowerActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.StoragePowerActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.StoragePowerActorCodeID, true
|
||||
}
|
||||
|
||||
case RewardKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.RewardActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.RewardActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.RewardActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.RewardActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.RewardActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.RewardActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.RewardActorCodeID, true
|
||||
}
|
||||
|
||||
case SystemKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.SystemActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.SystemActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.SystemActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.SystemActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.SystemActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.SystemActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.SystemActorCodeID, true
|
||||
}
|
||||
|
||||
case VerifregKey:
|
||||
switch av {
|
||||
|
||||
case Version0:
|
||||
case actorstypes.Version0:
|
||||
return builtin0.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version2:
|
||||
case actorstypes.Version2:
|
||||
return builtin2.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version3:
|
||||
case actorstypes.Version3:
|
||||
return builtin3.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version4:
|
||||
case actorstypes.Version4:
|
||||
return builtin4.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version5:
|
||||
case actorstypes.Version5:
|
||||
return builtin5.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version6:
|
||||
case actorstypes.Version6:
|
||||
return builtin6.VerifiedRegistryActorCodeID, true
|
||||
|
||||
case Version7:
|
||||
case actorstypes.Version7:
|
||||
return builtin7.VerifiedRegistryActorCodeID, true
|
||||
}
|
||||
}
|
||||
@ -307,13 +308,13 @@ func GetActorCodeID(av Version, name string) (cid.Cid, bool) {
|
||||
}
|
||||
|
||||
// GetActorCodeIDs looks up all builtin actor's code CIDs by actor version.
|
||||
func GetActorCodeIDs(av Version) (map[string]cid.Cid, error) {
|
||||
func GetActorCodeIDs(av actorstypes.Version) (map[string]cid.Cid, error) {
|
||||
cids, ok := GetActorCodeIDsFromManifest(av)
|
||||
if ok {
|
||||
return cids, nil
|
||||
}
|
||||
|
||||
actorsKeys := GetBuiltinActorsKeys()
|
||||
actorsKeys := GetBuiltinActorsKeys(av)
|
||||
synthCids := make(map[string]cid.Cid)
|
||||
|
||||
for _, key := range actorsKeys {
|
||||
|
@ -27,6 +27,8 @@ var actors = map[string][]int{
|
||||
"system": lotusactors.Versions,
|
||||
"reward": lotusactors.Versions,
|
||||
"verifreg": lotusactors.Versions,
|
||||
"datacap": lotusactors.Versions[8:],
|
||||
"evm": lotusactors.Versions[9:],
|
||||
}
|
||||
|
||||
func main() {
|
||||
@ -44,13 +46,18 @@ func main() {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := generateRegistry("chain/actors/builtin/registry.go"); err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func generateAdapters() error {
|
||||
for act, versions := range actors {
|
||||
actDir := filepath.Join("chain/actors/builtin", act)
|
||||
|
||||
if err := generateState(actDir); err != nil {
|
||||
if err := generateState(actDir, versions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -92,7 +99,7 @@ func generateAdapters() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateState(actDir string) error {
|
||||
func generateState(actDir string, versions []int) error {
|
||||
af, err := ioutil.ReadFile(filepath.Join(actDir, "state.go.template"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
@ -102,7 +109,7 @@ func generateState(actDir string) error {
|
||||
return xerrors.Errorf("loading state adapter template: %w", err)
|
||||
}
|
||||
|
||||
for _, version := range lotusactors.Versions {
|
||||
for _, version := range versions {
|
||||
tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af)))
|
||||
|
||||
var b bytes.Buffer
|
||||
@ -110,6 +117,7 @@ func generateState(actDir string) error {
|
||||
err := tpl.Execute(&b, map[string]interface{}{
|
||||
"v": version,
|
||||
"import": getVersionImports()[version],
|
||||
"latestVersion": lotusactors.LatestVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -141,6 +149,7 @@ func generateMessages(actDir string) error {
|
||||
err := tpl.Execute(&b, map[string]interface{}{
|
||||
"v": version,
|
||||
"import": getVersionImports()[version],
|
||||
"latestVersion": lotusactors.LatestVersion,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -216,6 +225,36 @@ func generateBuiltin(builtinPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateRegistry(registryPath string) error {
|
||||
|
||||
bf, err := ioutil.ReadFile(registryPath + ".template")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil // skip
|
||||
}
|
||||
|
||||
return xerrors.Errorf("loading registry template file: %w", err)
|
||||
}
|
||||
|
||||
tpl := template.Must(template.New("").Funcs(template.FuncMap{
|
||||
"import": func(v int) string { return getVersionImports()[v] },
|
||||
}).Parse(string(bf)))
|
||||
var b bytes.Buffer
|
||||
|
||||
err = tpl.Execute(&b, map[string]interface{}{
|
||||
"versions": lotusactors.Versions,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(registryPath, b.Bytes(), 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVersionImports() map[int]string {
|
||||
versionImports := make(map[int]string, lotusactors.LatestVersion)
|
||||
for _, v := range lotusactors.Versions {
|
||||
|
@ -1,10 +1,12 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
builtin8 "github.com/filecoin-project/go-state-types/builtin"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin10 "github.com/filecoin-project/go-state-types/builtin"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -19,7 +21,7 @@ import (
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Methods = builtin8.MethodsAccount
|
||||
var Methods = builtin10.MethodsAccount
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||
@ -29,9 +31,15 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
|
||||
switch av {
|
||||
|
||||
case actors.Version8:
|
||||
case actorstypes.Version8:
|
||||
return load8(store, act.Head)
|
||||
|
||||
case actorstypes.Version9:
|
||||
return load9(store, act.Head)
|
||||
|
||||
case actorstypes.Version10:
|
||||
return load10(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -63,33 +71,39 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
|
||||
func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (State, error) {
|
||||
switch av {
|
||||
|
||||
case actors.Version0:
|
||||
case actorstypes.Version0:
|
||||
return make0(store, addr)
|
||||
|
||||
case actors.Version2:
|
||||
case actorstypes.Version2:
|
||||
return make2(store, addr)
|
||||
|
||||
case actors.Version3:
|
||||
case actorstypes.Version3:
|
||||
return make3(store, addr)
|
||||
|
||||
case actors.Version4:
|
||||
case actorstypes.Version4:
|
||||
return make4(store, addr)
|
||||
|
||||
case actors.Version5:
|
||||
case actorstypes.Version5:
|
||||
return make5(store, addr)
|
||||
|
||||
case actors.Version6:
|
||||
case actorstypes.Version6:
|
||||
return make6(store, addr)
|
||||
|
||||
case actors.Version7:
|
||||
case actorstypes.Version7:
|
||||
return make7(store, addr)
|
||||
|
||||
case actors.Version8:
|
||||
case actorstypes.Version8:
|
||||
return make8(store, addr)
|
||||
|
||||
case actorstypes.Version9:
|
||||
return make9(store, addr)
|
||||
|
||||
case actorstypes.Version10:
|
||||
return make10(store, addr)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
@ -97,6 +111,25 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
Code() cid.Cid
|
||||
ActorKey() string
|
||||
ActorVersion() actorstypes.Version
|
||||
|
||||
PubkeyAddress() (address.Address, error)
|
||||
GetState() interface{}
|
||||
}
|
||||
|
||||
func AllCodes() []cid.Cid {
|
||||
return []cid.Cid{
|
||||
(&state0{}).Code(),
|
||||
(&state2{}).Code(),
|
||||
(&state3{}).Code(),
|
||||
(&state4{}).Code(),
|
||||
(&state5{}).Code(),
|
||||
(&state6{}).Code(),
|
||||
(&state7{}).Code(),
|
||||
(&state8{}).Code(),
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
@ -9,13 +11,13 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
{{range .versions}}
|
||||
{{if (le . 7)}}
|
||||
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
|
||||
{{else}}
|
||||
builtin{{.}} "github.com/filecoin-project/go-state-types/builtin"
|
||||
{{end}}
|
||||
{{end}}
|
||||
builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin"
|
||||
)
|
||||
|
||||
var Methods = builtin{{.latestVersion}}.MethodsAccount
|
||||
@ -29,7 +31,7 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch av {
|
||||
{{range .versions}}
|
||||
{{if (ge . 8)}}
|
||||
case actors.Version{{.}}:
|
||||
case actorstypes.Version{{.}}:
|
||||
return load{{.}}(store, act.Head)
|
||||
{{end}}
|
||||
{{end}}
|
||||
@ -48,10 +50,10 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
|
||||
func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (State, error) {
|
||||
switch av {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
case actorstypes.Version{{.}}:
|
||||
return make{{.}}(store, addr)
|
||||
{{end}}
|
||||
}
|
||||
@ -61,6 +63,17 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State,
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
Code() cid.Cid
|
||||
ActorKey() string
|
||||
ActorVersion() actorstypes.Version
|
||||
|
||||
PubkeyAddress() (address.Address, error)
|
||||
GetState() interface{}
|
||||
}
|
||||
|
||||
func AllCodes() []cid.Cid {
|
||||
return []cid.Cid{ {{range .versions}}
|
||||
(&state{{.}}{}).Code(),
|
||||
{{- end}}
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,14 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
{{if (le .v 7)}}
|
||||
account{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/account"
|
||||
@ -42,3 +46,20 @@ func (s *state{{.v}}) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state{{.v}}) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version{{.v}}
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
21
chain/actors/builtin/account/v0.go
generated
21
chain/actors/builtin/account/v0.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account0 "github.com/filecoin-project/specs-actors/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state0) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state0) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state0) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state0) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version0
|
||||
}
|
||||
|
||||
func (s *state0) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
61
chain/actors/builtin/account/v10.go
generated
Normal file
61
chain/actors/builtin/account/v10.go
generated
Normal file
@ -0,0 +1,61 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account10 "github.com/filecoin-project/go-state-types/builtin/v10/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state10)(nil)
|
||||
|
||||
func load10(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state10{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make10(store adt.Store, addr address.Address) (State, error) {
|
||||
out := state10{store: store}
|
||||
out.State = account10.State{Address: addr}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state10 struct {
|
||||
account10.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state10) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
||||
|
||||
func (s *state10) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state10) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state10) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version10
|
||||
}
|
||||
|
||||
func (s *state10) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
21
chain/actors/builtin/account/v2.go
generated
21
chain/actors/builtin/account/v2.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state2) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state2) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state2) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state2) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version2
|
||||
}
|
||||
|
||||
func (s *state2) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v3.go
generated
21
chain/actors/builtin/account/v3.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state3) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state3) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state3) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state3) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version3
|
||||
}
|
||||
|
||||
func (s *state3) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v4.go
generated
21
chain/actors/builtin/account/v4.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state4) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state4) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state4) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state4) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version4
|
||||
}
|
||||
|
||||
func (s *state4) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v5.go
generated
21
chain/actors/builtin/account/v5.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state5) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state5) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state5) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state5) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version5
|
||||
}
|
||||
|
||||
func (s *state5) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v6.go
generated
21
chain/actors/builtin/account/v6.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state6) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state6) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state6) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version6
|
||||
}
|
||||
|
||||
func (s *state6) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v7.go
generated
21
chain/actors/builtin/account/v7.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state7) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state7) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state7) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state7) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version7
|
||||
}
|
||||
|
||||
func (s *state7) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/account/v8.go
generated
21
chain/actors/builtin/account/v8.go
generated
@ -1,11 +1,15 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account8 "github.com/filecoin-project/go-state-types/builtin/v8/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -38,3 +42,20 @@ func (s *state8) PubkeyAddress() (address.Address, error) {
|
||||
func (s *state8) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state8) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state8) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version8
|
||||
}
|
||||
|
||||
func (s *state8) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
61
chain/actors/builtin/account/v9.go
generated
Normal file
61
chain/actors/builtin/account/v9.go
generated
Normal file
@ -0,0 +1,61 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
account9 "github.com/filecoin-project/go-state-types/builtin/v9/account"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state9)(nil)
|
||||
|
||||
func load9(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state9{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make9(store adt.Store, addr address.Address) (State, error) {
|
||||
out := state9{store: store}
|
||||
out.State = account9.State{Address: addr}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state9 struct {
|
||||
account9.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state9) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
||||
|
||||
func (s *state9) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state9) ActorKey() string {
|
||||
return actors.AccountKey
|
||||
}
|
||||
|
||||
func (s *state9) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version9
|
||||
}
|
||||
|
||||
func (s *state9) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
@ -8,8 +8,8 @@ import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/builtin"
|
||||
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
"github.com/filecoin-project/go-state-types/proof"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
@ -18,7 +18,6 @@ import (
|
||||
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
|
||||
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
|
||||
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
|
||||
builtin8 "github.com/filecoin-project/specs-actors/v8/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
)
|
||||
@ -53,7 +52,7 @@ type PoStProof = proof.PoStProof
|
||||
type FilterEstimate = smoothingtypes.FilterEstimate
|
||||
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner8.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
return minertypes.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
|
||||
func ActorNameByCode(c cid.Cid) string {
|
||||
@ -84,9 +83,6 @@ func ActorNameByCode(c cid.Cid) string {
|
||||
case builtin7.IsBuiltinActor(c):
|
||||
return builtin7.ActorNameByCode(c)
|
||||
|
||||
case builtin8.IsBuiltinActor(c):
|
||||
return builtin8.ActorNameByCode(c)
|
||||
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
|
@ -7,8 +7,10 @@ import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
{{range .versions}}
|
||||
{{if (le . 7)}}
|
||||
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/proof"
|
||||
@ -16,7 +18,7 @@ import (
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
|
||||
miner{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin/v8/miner"
|
||||
minertypes "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||
smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing"
|
||||
)
|
||||
|
||||
@ -50,7 +52,7 @@ type PoStProof = proof.PoStProof
|
||||
type FilterEstimate = smoothingtypes.FilterEstimate
|
||||
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner{{.latestVersion}}.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
return minertypes.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
|
||||
func ActorNameByCode(c cid.Cid) string {
|
||||
@ -60,9 +62,11 @@ func ActorNameByCode(c cid.Cid) string {
|
||||
|
||||
switch {
|
||||
{{range .versions}}
|
||||
{{if (le . 7)}}
|
||||
case builtin{{.}}.IsBuiltinActor(c):
|
||||
return builtin{{.}}.ActorNameByCode(c)
|
||||
{{end}}
|
||||
{{end}}
|
||||
default:
|
||||
return "<unknown>"
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"golang.org/x/xerrors"
|
||||
@ -9,10 +11,9 @@ import (
|
||||
{{range .versions}}
|
||||
{{if (le . 7)}}
|
||||
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
|
||||
{{else}}
|
||||
builtin{{.}} "github.com/filecoin-project/go-state-types/builtin"
|
||||
{{end}}
|
||||
{{end}}
|
||||
builtin{{.latestVersion}} "github.com/filecoin-project/go-state-types/builtin"
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
@ -24,7 +25,7 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch av {
|
||||
{{range .versions}}
|
||||
{{if (ge . 8)}}
|
||||
case actors.Version{{.}}:
|
||||
case actorstypes.Version{{.}}:
|
||||
return load{{.}}(store, act.Head)
|
||||
{{end}}
|
||||
{{end}}
|
||||
@ -43,10 +44,10 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
||||
switch av {
|
||||
{{range .versions}}
|
||||
case actors.Version{{.}}:
|
||||
case actorstypes.Version{{.}}:
|
||||
return make{{.}}(store)
|
||||
{{end}}
|
||||
}
|
||||
@ -60,5 +61,16 @@ var (
|
||||
|
||||
|
||||
type State interface {
|
||||
Code() cid.Cid
|
||||
ActorKey() string
|
||||
ActorVersion() actorstypes.Version
|
||||
|
||||
GetState() interface{}
|
||||
}
|
||||
|
||||
func AllCodes() []cid.Cid {
|
||||
return []cid.Cid{ {{range .versions}}
|
||||
(&state{{.}}{}).Code(),
|
||||
{{- end}}
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
builtin8 "github.com/filecoin-project/go-state-types/builtin"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
builtin10 "github.com/filecoin-project/go-state-types/builtin"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
|
||||
@ -25,9 +27,15 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
|
||||
switch av {
|
||||
|
||||
case actors.Version8:
|
||||
case actorstypes.Version8:
|
||||
return load8(store, act.Head)
|
||||
|
||||
case actorstypes.Version9:
|
||||
return load9(store, act.Head)
|
||||
|
||||
case actorstypes.Version10:
|
||||
return load10(store, act.Head)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,42 +67,67 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
func MakeState(store adt.Store, av actors.Version) (State, error) {
|
||||
func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
||||
switch av {
|
||||
|
||||
case actors.Version0:
|
||||
case actorstypes.Version0:
|
||||
return make0(store)
|
||||
|
||||
case actors.Version2:
|
||||
case actorstypes.Version2:
|
||||
return make2(store)
|
||||
|
||||
case actors.Version3:
|
||||
case actorstypes.Version3:
|
||||
return make3(store)
|
||||
|
||||
case actors.Version4:
|
||||
case actorstypes.Version4:
|
||||
return make4(store)
|
||||
|
||||
case actors.Version5:
|
||||
case actorstypes.Version5:
|
||||
return make5(store)
|
||||
|
||||
case actors.Version6:
|
||||
case actorstypes.Version6:
|
||||
return make6(store)
|
||||
|
||||
case actors.Version7:
|
||||
case actorstypes.Version7:
|
||||
return make7(store)
|
||||
|
||||
case actors.Version8:
|
||||
case actorstypes.Version8:
|
||||
return make8(store)
|
||||
|
||||
case actorstypes.Version9:
|
||||
return make9(store)
|
||||
|
||||
case actorstypes.Version10:
|
||||
return make10(store)
|
||||
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||
}
|
||||
|
||||
var (
|
||||
Address = builtin8.CronActorAddr
|
||||
Methods = builtin8.MethodsCron
|
||||
Address = builtin10.CronActorAddr
|
||||
Methods = builtin10.MethodsCron
|
||||
)
|
||||
|
||||
type State interface {
|
||||
Code() cid.Cid
|
||||
ActorKey() string
|
||||
ActorVersion() actorstypes.Version
|
||||
|
||||
GetState() interface{}
|
||||
}
|
||||
|
||||
func AllCodes() []cid.Cid {
|
||||
return []cid.Cid{
|
||||
(&state0{}).Code(),
|
||||
(&state2{}).Code(),
|
||||
(&state3{}).Code(),
|
||||
(&state4{}).Code(),
|
||||
(&state5{}).Code(),
|
||||
(&state6{}).Code(),
|
||||
(&state7{}).Code(),
|
||||
(&state8{}).Code(),
|
||||
(&state9{}).Code(),
|
||||
(&state10{}).Code(),
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,12 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
|
||||
{{if (le .v 7)}}
|
||||
cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron"
|
||||
@ -37,3 +40,20 @@ type state{{.v}} struct {
|
||||
func (s *state{{.v}}) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version{{.v}}
|
||||
}
|
||||
|
||||
func (s *state{{.v}}) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/cron/v0.go
generated
21
chain/actors/builtin/cron/v0.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state0 struct {
|
||||
func (s *state0) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state0) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state0) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version0
|
||||
}
|
||||
|
||||
func (s *state0) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
56
chain/actors/builtin/cron/v10.go
generated
Normal file
56
chain/actors/builtin/cron/v10.go
generated
Normal file
@ -0,0 +1,56 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron10 "github.com/filecoin-project/go-state-types/builtin/v10/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state10)(nil)
|
||||
|
||||
func load10(store adt.Store, root cid.Cid) (State, error) {
|
||||
out := state10{store: store}
|
||||
err := store.Get(store.Context(), root, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func make10(store adt.Store) (State, error) {
|
||||
out := state10{store: store}
|
||||
out.State = *cron10.ConstructState(cron10.BuiltInEntries())
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
type state10 struct {
|
||||
cron10.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state10) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state10) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state10) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version10
|
||||
}
|
||||
|
||||
func (s *state10) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
21
chain/actors/builtin/cron/v2.go
generated
21
chain/actors/builtin/cron/v2.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state2 struct {
|
||||
func (s *state2) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state2) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state2) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version2
|
||||
}
|
||||
|
||||
func (s *state2) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/cron/v3.go
generated
21
chain/actors/builtin/cron/v3.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state3 struct {
|
||||
func (s *state3) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state3) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state3) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version3
|
||||
}
|
||||
|
||||
func (s *state3) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/cron/v4.go
generated
21
chain/actors/builtin/cron/v4.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state4 struct {
|
||||
func (s *state4) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state4) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state4) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version4
|
||||
}
|
||||
|
||||
func (s *state4) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/cron/v5.go
generated
21
chain/actors/builtin/cron/v5.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state5 struct {
|
||||
func (s *state5) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state5) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state5) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version5
|
||||
}
|
||||
|
||||
func (s *state5) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
21
chain/actors/builtin/cron/v6.go
generated
21
chain/actors/builtin/cron/v6.go
generated
@ -1,10 +1,14 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||
cron6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/cron"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
@ -33,3 +37,20 @@ type state6 struct {
|
||||
func (s *state6) GetState() interface{} {
|
||||
return &s.State
|
||||
}
|
||||
|
||||
func (s *state6) ActorKey() string {
|
||||
return actors.CronKey
|
||||
}
|
||||
|
||||
func (s *state6) ActorVersion() actorstypes.Version {
|
||||
return actorstypes.Version6
|
||||
}
|
||||
|
||||
func (s *state6) Code() cid.Cid {
|
||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
||||
if !ok {
|
||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
||||
}
|
||||
|
||||
return code
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user