Merge remote-tracking branch 'origin/master' into feat/compact-sectors-numbers-cmd
This commit is contained in:
commit
0656b62176
@ -1,15 +1,17 @@
|
|||||||
version: 2.1
|
version: 2.1
|
||||||
orbs:
|
orbs:
|
||||||
go: gotest/tools@0.0.13
|
go: gotest/tools@0.0.13
|
||||||
|
aws-cli: circleci/aws-cli@1.3.2
|
||||||
|
packer: salaxander/packer@0.0.3
|
||||||
|
|
||||||
executors:
|
executors:
|
||||||
golang:
|
golang:
|
||||||
docker:
|
docker:
|
||||||
- image: circleci/golang:1.14.6
|
- image: circleci/golang:1.16.4
|
||||||
resource_class: 2xlarge
|
resource_class: 2xlarge
|
||||||
ubuntu:
|
ubuntu:
|
||||||
docker:
|
docker:
|
||||||
- image: ubuntu:19.10
|
- image: ubuntu:20.04
|
||||||
|
|
||||||
commands:
|
commands:
|
||||||
install-deps:
|
install-deps:
|
||||||
@ -110,7 +112,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
command: make debug
|
command: make debug
|
||||||
|
|
||||||
test: &test
|
test:
|
||||||
description: |
|
description: |
|
||||||
Run tests with gotestsum.
|
Run tests with gotestsum.
|
||||||
parameters: &test-params
|
parameters: &test-params
|
||||||
@ -121,20 +123,20 @@ jobs:
|
|||||||
type: string
|
type: string
|
||||||
default: "-timeout 30m"
|
default: "-timeout 30m"
|
||||||
description: Flags passed to go test.
|
description: Flags passed to go test.
|
||||||
packages:
|
target:
|
||||||
type: string
|
type: string
|
||||||
default: "./..."
|
default: "./..."
|
||||||
description: Import paths of packages to be tested.
|
description: Import paths of packages to be tested.
|
||||||
winpost-test:
|
proofs-log-test:
|
||||||
type: string
|
type: string
|
||||||
default: "0"
|
default: "0"
|
||||||
test-suite-name:
|
suite:
|
||||||
type: string
|
type: string
|
||||||
default: unit
|
default: unit
|
||||||
description: Test suite name to report to CircleCI.
|
description: Test suite name to report to CircleCI.
|
||||||
gotestsum-format:
|
gotestsum-format:
|
||||||
type: string
|
type: string
|
||||||
default: pkgname-and-test-fails
|
default: standard-verbose
|
||||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||||
coverage:
|
coverage:
|
||||||
type: string
|
type: string
|
||||||
@ -142,7 +144,7 @@ jobs:
|
|||||||
description: Coverage flag. Set to the empty string to disable.
|
description: Coverage flag. Set to the empty string to disable.
|
||||||
codecov-upload:
|
codecov-upload:
|
||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: true
|
||||||
description: |
|
description: |
|
||||||
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
|
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
|
||||||
set as an environment variable for private projects.
|
set as an environment variable for private projects.
|
||||||
@ -160,24 +162,24 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: go test
|
name: go test
|
||||||
environment:
|
environment:
|
||||||
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
|
||||||
SKIP_CONFORMANCE: "1"
|
SKIP_CONFORMANCE: "1"
|
||||||
command: |
|
command: |
|
||||||
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||||
mkdir -p /tmp/test-artifacts
|
mkdir -p /tmp/test-artifacts
|
||||||
gotestsum \
|
gotestsum \
|
||||||
--format << parameters.gotestsum-format >> \
|
--format << parameters.gotestsum-format >> \
|
||||||
--junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \
|
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||||
--jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \
|
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||||
-- \
|
-- \
|
||||||
<< parameters.coverage >> \
|
<< parameters.coverage >> \
|
||||||
<< parameters.go-test-flags >> \
|
<< parameters.go-test-flags >> \
|
||||||
<< parameters.packages >>
|
<< parameters.target >>
|
||||||
no_output_timeout: 30m
|
no_output_timeout: 30m
|
||||||
- store_test_results:
|
- store_test_results:
|
||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json
|
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||||
- when:
|
- when:
|
||||||
condition: << parameters.codecov-upload >>
|
condition: << parameters.codecov-upload >>
|
||||||
steps:
|
steps:
|
||||||
@ -188,18 +190,6 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
bash <(curl -s https://codecov.io/bash)
|
bash <(curl -s https://codecov.io/bash)
|
||||||
|
|
||||||
test-chain:
|
|
||||||
<<: *test
|
|
||||||
test-node:
|
|
||||||
<<: *test
|
|
||||||
test-storage:
|
|
||||||
<<: *test
|
|
||||||
test-cli:
|
|
||||||
<<: *test
|
|
||||||
test-short:
|
|
||||||
<<: *test
|
|
||||||
test-window-post:
|
|
||||||
<<: *test
|
|
||||||
test-conformance:
|
test-conformance:
|
||||||
description: |
|
description: |
|
||||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||||
@ -262,24 +252,97 @@ jobs:
|
|||||||
path: /tmp/test-reports
|
path: /tmp/test-reports
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: /tmp/test-artifacts/conformance-coverage.html
|
path: /tmp/test-artifacts/conformance-coverage.html
|
||||||
build-lotus-soup:
|
build-ntwk-calibration:
|
||||||
description: |
|
description: |
|
||||||
Compile `lotus-soup` Testground test plan using the current version of Lotus.
|
Compile lotus binaries for the calibration network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make calibnet
|
||||||
|
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-calibrationnet
|
||||||
|
build-ntwk-butterfly:
|
||||||
|
description: |
|
||||||
|
Compile lotus binaries for the butterfly network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make butterflynet
|
||||||
|
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-butterflynet
|
||||||
|
build-ntwk-nerpa:
|
||||||
|
description: |
|
||||||
|
Compile lotus binaries for the nerpa network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make nerpanet
|
||||||
|
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-nerpanet
|
||||||
|
build-lotus-soup:
|
||||||
|
description: |
|
||||||
|
Compile `lotus-soup` Testground test plan
|
||||||
parameters:
|
parameters:
|
||||||
<<: *test-params
|
<<: *test-params
|
||||||
executor: << parameters.executor >>
|
executor: << parameters.executor >>
|
||||||
steps:
|
steps:
|
||||||
- install-deps
|
- install-deps
|
||||||
- prepare
|
- prepare
|
||||||
- run: cd extern/oni && git submodule sync
|
|
||||||
- run: cd extern/oni && git submodule update --init
|
|
||||||
- run: cd extern/filecoin-ffi && make
|
- run: cd extern/filecoin-ffi && make
|
||||||
- run:
|
- run:
|
||||||
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
|
name: "go get lotus@master"
|
||||||
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
|
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
|
||||||
- run:
|
- run:
|
||||||
name: "build lotus-soup testplan"
|
name: "build lotus-soup testplan"
|
||||||
command: pushd extern/oni/lotus-soup && go build -tags=testground .
|
command: pushd testplans/lotus-soup && go build -tags=testground .
|
||||||
|
trigger-testplans:
|
||||||
|
description: |
|
||||||
|
Trigger `lotus-soup` test cases on TaaS
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
name: "download testground"
|
||||||
|
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
|
||||||
|
- run:
|
||||||
|
name: "prepare .env.toml"
|
||||||
|
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||||
|
- run:
|
||||||
|
name: "prepare testground home dir and link test plans"
|
||||||
|
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
|
||||||
|
- run:
|
||||||
|
name: "go get lotus@master"
|
||||||
|
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||||
|
- run:
|
||||||
|
name: "trigger deals baseline testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
- run:
|
||||||
|
name: "trigger payment channel stress testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
- run:
|
||||||
|
name: "trigger graphsync testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
|
||||||
|
|
||||||
build-macos:
|
build-macos:
|
||||||
@ -294,8 +357,8 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install go
|
name: Install go
|
||||||
command: |
|
command: |
|
||||||
curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \
|
curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
|
||||||
sudo installer -pkg go1.14.2.darwin-amd64.pkg -target /
|
sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
|
||||||
- run:
|
- run:
|
||||||
name: Install pkg-config
|
name: Install pkg-config
|
||||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||||
@ -309,6 +372,15 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
||||||
chmod +x /usr/local/bin/jq
|
chmod +x /usr/local/bin/jq
|
||||||
|
- run:
|
||||||
|
name: Install hwloc
|
||||||
|
command: |
|
||||||
|
mkdir ~/hwloc
|
||||||
|
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||||
|
cd ~/hwloc
|
||||||
|
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||||
|
cd hwloc-2.4.1
|
||||||
|
./configure && make && sudo make install
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
name: restore cargo cache
|
name: restore cargo cache
|
||||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
@ -334,6 +406,41 @@ jobs:
|
|||||||
- "~/.rustup"
|
- "~/.rustup"
|
||||||
- "~/.cargo"
|
- "~/.cargo"
|
||||||
|
|
||||||
|
build-appimage:
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202104-01
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- run:
|
||||||
|
name: install appimage-builder
|
||||||
|
command: |
|
||||||
|
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||||
|
sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
|
||||||
|
sudo chmod +x /usr/local/bin/appimagetool
|
||||||
|
sudo pip3 install appimage-builder
|
||||||
|
- run:
|
||||||
|
name: install lotus dependencies
|
||||||
|
command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
- run:
|
||||||
|
name: build appimage
|
||||||
|
command: |
|
||||||
|
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||||
|
make appimage
|
||||||
|
- run:
|
||||||
|
name: prepare workspace
|
||||||
|
command: |
|
||||||
|
mkdir appimage
|
||||||
|
mv Lotus-*.AppImage appimage
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- appimage
|
||||||
|
|
||||||
|
|
||||||
gofmt:
|
gofmt:
|
||||||
executor: golang
|
executor: golang
|
||||||
steps:
|
steps:
|
||||||
@ -342,7 +449,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
command: "! go fmt ./... 2>&1 | read"
|
command: "! go fmt ./... 2>&1 | read"
|
||||||
|
|
||||||
cbor-gen-check:
|
gen-check:
|
||||||
executor: golang
|
executor: golang
|
||||||
steps:
|
steps:
|
||||||
- install-deps
|
- install-deps
|
||||||
@ -350,7 +457,10 @@ jobs:
|
|||||||
- run: make deps
|
- run: make deps
|
||||||
- run: go install golang.org/x/tools/cmd/goimports
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
- run: go install github.com/hannahhoward/cbor-gen-for
|
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||||
- run: go generate ./...
|
- run: make gen
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
- run: make docsgen-cli
|
||||||
- run: git --no-pager diff
|
- run: git --no-pager diff
|
||||||
- run: git --no-pager diff --quiet
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
@ -359,8 +469,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- install-deps
|
- install-deps
|
||||||
- prepare
|
- prepare
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
||||||
|
- run: make deps
|
||||||
- run: make docsgen
|
- run: make docsgen
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
||||||
- run: git --no-pager diff
|
- run: git --no-pager diff
|
||||||
|
- run: diff ../pre-openrpc-full ../post-openrpc-full
|
||||||
|
- run: diff ../pre-openrpc-miner ../post-openrpc-miner
|
||||||
|
- run: diff ../pre-openrpc-worker ../post-openrpc-worker
|
||||||
- run: git --no-pager diff --quiet
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
lint: &lint
|
lint: &lint
|
||||||
@ -422,6 +543,198 @@ jobs:
|
|||||||
name: Publish release
|
name: Publish release
|
||||||
command: ./scripts/publish-release.sh
|
command: ./scripts/publish-release.sh
|
||||||
|
|
||||||
|
publish-snapcraft:
|
||||||
|
description: build and push snapcraft
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202104-01
|
||||||
|
resource_class: 2xlarge
|
||||||
|
parameters:
|
||||||
|
channel:
|
||||||
|
type: string
|
||||||
|
default: "edge"
|
||||||
|
description: snapcraft channel
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: install snapcraft
|
||||||
|
command: sudo snap install snapcraft --classic
|
||||||
|
- run:
|
||||||
|
name: create snapcraft config file
|
||||||
|
command: |
|
||||||
|
mkdir -p ~/.config/snapcraft
|
||||||
|
echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
|
||||||
|
- run:
|
||||||
|
name: build snap
|
||||||
|
command: snapcraft --use-lxd
|
||||||
|
- run:
|
||||||
|
name: publish snap
|
||||||
|
command: snapcraft push *.snap --release << parameters.channel >>
|
||||||
|
|
||||||
|
build-and-push-image:
|
||||||
|
description: build and push docker images to public AWS ECR registry
|
||||||
|
executor: aws-cli/default
|
||||||
|
parameters:
|
||||||
|
profile-name:
|
||||||
|
type: string
|
||||||
|
default: "default"
|
||||||
|
description: AWS profile name to be configured.
|
||||||
|
|
||||||
|
aws-access-key-id:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_ACCESS_KEY_ID
|
||||||
|
description: >
|
||||||
|
AWS access key id for IAM role. Set this to the name of
|
||||||
|
the environment variable you will set to hold this
|
||||||
|
value, i.e. AWS_ACCESS_KEY.
|
||||||
|
|
||||||
|
aws-secret-access-key:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_SECRET_ACCESS_KEY
|
||||||
|
description: >
|
||||||
|
AWS secret key for IAM role. Set this to the name of
|
||||||
|
the environment variable you will set to hold this
|
||||||
|
value, i.e. AWS_SECRET_ACCESS_KEY.
|
||||||
|
|
||||||
|
region:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_REGION
|
||||||
|
description: >
|
||||||
|
Name of env var storing your AWS region information,
|
||||||
|
defaults to AWS_REGION
|
||||||
|
|
||||||
|
account-url:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_ECR_ACCOUNT_URL
|
||||||
|
description: >
|
||||||
|
Env var storing Amazon ECR account URL that maps to an AWS account,
|
||||||
|
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
|
||||||
|
defaults to AWS_ECR_ACCOUNT_URL
|
||||||
|
|
||||||
|
dockerfile:
|
||||||
|
type: string
|
||||||
|
default: Dockerfile
|
||||||
|
description: Name of dockerfile to use. Defaults to Dockerfile.
|
||||||
|
|
||||||
|
path:
|
||||||
|
type: string
|
||||||
|
default: .
|
||||||
|
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
|
||||||
|
|
||||||
|
extra-build-args:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: >
|
||||||
|
Extra flags to pass to docker build. For examples, see
|
||||||
|
https://docs.docker.com/engine/reference/commandline/build
|
||||||
|
|
||||||
|
repo:
|
||||||
|
type: string
|
||||||
|
description: Name of an Amazon ECR repository
|
||||||
|
|
||||||
|
tag:
|
||||||
|
type: string
|
||||||
|
default: "latest"
|
||||||
|
description: A comma-separated string containing docker image tags to build and push (default = latest)
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Confirm that environment variables are set
|
||||||
|
command: |
|
||||||
|
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||||
|
echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
|
||||||
|
circleci-agent step halt
|
||||||
|
fi
|
||||||
|
|
||||||
|
- aws-cli/setup:
|
||||||
|
profile-name: <<parameters.profile-name>>
|
||||||
|
aws-access-key-id: <<parameters.aws-access-key-id>>
|
||||||
|
aws-secret-access-key: <<parameters.aws-secret-access-key>>
|
||||||
|
aws-region: <<parameters.region>>
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Log into Amazon ECR
|
||||||
|
command: |
|
||||||
|
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
|
||||||
|
|
||||||
|
- checkout
|
||||||
|
|
||||||
|
- setup_remote_docker:
|
||||||
|
version: 19.03.13
|
||||||
|
docker_layer_caching: false
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Build docker image
|
||||||
|
command: |
|
||||||
|
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
|
||||||
|
|
||||||
|
docker_tag_args=""
|
||||||
|
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||||
|
for tag in "${DOCKER_TAGS[@]}"; do
|
||||||
|
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
|
||||||
|
done
|
||||||
|
|
||||||
|
docker build \
|
||||||
|
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
|
||||||
|
-f <<parameters.path>>/<<parameters.dockerfile>> \
|
||||||
|
$docker_tag_args \
|
||||||
|
<<parameters.path>>
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Push image to Amazon ECR
|
||||||
|
command: |
|
||||||
|
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||||
|
for tag in "${DOCKER_TAGS[@]}"; do
|
||||||
|
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
|
||||||
|
done
|
||||||
|
|
||||||
|
publish-packer-mainnet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-calibrationnet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-butterflynet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-nerpanet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2.1
|
version: 2.1
|
||||||
@ -431,73 +744,289 @@ workflows:
|
|||||||
concurrency: "16" # expend all docker 2xlarge CPUs.
|
concurrency: "16" # expend all docker 2xlarge CPUs.
|
||||||
- mod-tidy-check
|
- mod-tidy-check
|
||||||
- gofmt
|
- gofmt
|
||||||
- cbor-gen-check
|
- gen-check
|
||||||
- docs-check
|
- docs-check
|
||||||
- test:
|
- test:
|
||||||
codecov-upload: true
|
name: test-itest-api
|
||||||
test-suite-name: full
|
suite: itest-api
|
||||||
- test-chain:
|
target: "./itests/api_test.go"
|
||||||
codecov-upload: true
|
|
||||||
test-suite-name: chain
|
- test:
|
||||||
packages: "./chain/..."
|
name: test-itest-batch_deal
|
||||||
- test-node:
|
suite: itest-batch_deal
|
||||||
codecov-upload: true
|
target: "./itests/batch_deal_test.go"
|
||||||
test-suite-name: node
|
|
||||||
packages: "./node/..."
|
- test:
|
||||||
- test-storage:
|
name: test-itest-ccupgrade
|
||||||
codecov-upload: true
|
suite: itest-ccupgrade
|
||||||
test-suite-name: storage
|
target: "./itests/ccupgrade_test.go"
|
||||||
packages: "./storage/... ./extern/..."
|
|
||||||
- test-cli:
|
- test:
|
||||||
codecov-upload: true
|
name: test-itest-cli
|
||||||
test-suite-name: cli
|
suite: itest-cli
|
||||||
packages: "./cli/... ./cmd/... ./api/..."
|
target: "./itests/cli_test.go"
|
||||||
- test-window-post:
|
|
||||||
go-test-flags: "-run=TestWindowedPost"
|
- test:
|
||||||
winpost-test: "1"
|
name: test-itest-deadlines
|
||||||
test-suite-name: window-post
|
suite: itest-deadlines
|
||||||
- test-short:
|
target: "./itests/deadlines_test.go"
|
||||||
go-test-flags: "--timeout 10m --short"
|
|
||||||
test-suite-name: short
|
- test:
|
||||||
filters:
|
name: test-itest-deals_concurrent
|
||||||
tags:
|
suite: itest-deals_concurrent
|
||||||
only:
|
target: "./itests/deals_concurrent_test.go"
|
||||||
- /^v\d+\.\d+\.\d+$/
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_offline
|
||||||
|
suite: itest-deals_offline
|
||||||
|
target: "./itests/deals_offline_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_power
|
||||||
|
suite: itest-deals_power
|
||||||
|
target: "./itests/deals_power_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_pricing
|
||||||
|
suite: itest-deals_pricing
|
||||||
|
target: "./itests/deals_pricing_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals_publish
|
||||||
|
suite: itest-deals_publish
|
||||||
|
target: "./itests/deals_publish_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-deals
|
||||||
|
suite: itest-deals
|
||||||
|
target: "./itests/deals_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-gateway
|
||||||
|
suite: itest-gateway
|
||||||
|
target: "./itests/gateway_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-get_messages_in_ts
|
||||||
|
suite: itest-get_messages_in_ts
|
||||||
|
target: "./itests/get_messages_in_ts_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-multisig
|
||||||
|
suite: itest-multisig
|
||||||
|
target: "./itests/multisig_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-nonce
|
||||||
|
suite: itest-nonce
|
||||||
|
target: "./itests/nonce_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-paych_api
|
||||||
|
suite: itest-paych_api
|
||||||
|
target: "./itests/paych_api_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-paych_cli
|
||||||
|
suite: itest-paych_cli
|
||||||
|
target: "./itests/paych_cli_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-sdr_upgrade
|
||||||
|
suite: itest-sdr_upgrade
|
||||||
|
target: "./itests/sdr_upgrade_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-sector_finalize_early
|
||||||
|
suite: itest-sector_finalize_early
|
||||||
|
target: "./itests/sector_finalize_early_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-sector_miner_collateral
|
||||||
|
suite: itest-sector_miner_collateral
|
||||||
|
target: "./itests/sector_miner_collateral_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-sector_pledge
|
||||||
|
suite: itest-sector_pledge
|
||||||
|
target: "./itests/sector_pledge_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-sector_terminate
|
||||||
|
suite: itest-sector_terminate
|
||||||
|
target: "./itests/sector_terminate_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-tape
|
||||||
|
suite: itest-tape
|
||||||
|
target: "./itests/tape_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-verifreg
|
||||||
|
suite: itest-verifreg
|
||||||
|
target: "./itests/verifreg_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-wdpost_dispute
|
||||||
|
suite: itest-wdpost_dispute
|
||||||
|
target: "./itests/wdpost_dispute_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-itest-wdpost
|
||||||
|
suite: itest-wdpost
|
||||||
|
target: "./itests/wdpost_test.go"
|
||||||
|
|
||||||
|
- test:
|
||||||
|
name: test-unit-cli
|
||||||
|
suite: utest-unit-cli
|
||||||
|
target: "./cli/... ./cmd/... ./api/..."
|
||||||
|
- test:
|
||||||
|
name: test-unit-node
|
||||||
|
suite: utest-unit-node
|
||||||
|
target: "./node/..."
|
||||||
|
- test:
|
||||||
|
name: test-unit-rest
|
||||||
|
suite: utest-unit-rest
|
||||||
|
target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
|
||||||
|
- test:
|
||||||
|
name: test-unit-storage
|
||||||
|
suite: utest-unit-storage
|
||||||
|
target: "./storage/... ./extern/..."
|
||||||
|
- test:
|
||||||
|
go-test-flags: "-run=TestMulticoreSDR"
|
||||||
|
suite: multicore-sdr-check
|
||||||
|
target: "./extern/sector-storage/ffiwrapper"
|
||||||
|
proofs-log-test: "1"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
test-suite-name: conformance
|
suite: conformance
|
||||||
packages: "./conformance"
|
codecov-upload: false
|
||||||
|
target: "./conformance"
|
||||||
- test-conformance:
|
- test-conformance:
|
||||||
name: test-conformance-bleeding-edge
|
name: test-conformance-bleeding-edge
|
||||||
test-suite-name: conformance-bleeding-edge
|
codecov-upload: false
|
||||||
packages: "./conformance"
|
suite: conformance-bleeding-edge
|
||||||
|
target: "./conformance"
|
||||||
vectors-branch: master
|
vectors-branch: master
|
||||||
- build-lotus-soup
|
- trigger-testplans:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
- build-debug
|
- build-debug
|
||||||
- build-all:
|
- build-all:
|
||||||
requires:
|
|
||||||
- test-short
|
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-calibration:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-butterfly:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-nerpa:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-lotus-soup
|
||||||
- build-macos:
|
- build-macos:
|
||||||
requires:
|
|
||||||
- test-short
|
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
- /.*/
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-appimage:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
- publish:
|
- publish:
|
||||||
requires:
|
requires:
|
||||||
- build-all
|
- build-all
|
||||||
- build-macos
|
- build-macos
|
||||||
|
- build-appimage
|
||||||
filters:
|
filters:
|
||||||
branches:
|
branches:
|
||||||
ignore:
|
ignore:
|
||||||
- /.*/
|
- /.*/
|
||||||
tags:
|
tags:
|
||||||
only:
|
only:
|
||||||
- /^v\d+\.\d+\.\d+$/
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-and-push-image:
|
||||||
|
dockerfile: Dockerfile.lotus
|
||||||
|
path: .
|
||||||
|
repo: lotus-dev
|
||||||
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
|
- publish-packer-mainnet:
|
||||||
|
requires:
|
||||||
|
- build-all
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-calibrationnet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-calibration
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-butterflynet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-butterfly
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-nerpanet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-nerpa
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: publish-snapcraft-stable
|
||||||
|
channel: stable
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
|
||||||
|
nightly:
|
||||||
|
triggers:
|
||||||
|
- schedule:
|
||||||
|
cron: "0 0 * * *"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
jobs:
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: publish-snapcraft-nightly
|
||||||
|
channel: edge
|
||||||
|
|||||||
136
.circleci/gen.go
Normal file
136
.circleci/gen.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate go run ./gen.go ..
|
||||||
|
|
||||||
|
//go:embed template.yml
|
||||||
|
var templateFile embed.FS
|
||||||
|
|
||||||
|
type (
|
||||||
|
dirs = []string
|
||||||
|
suite = string
|
||||||
|
)
|
||||||
|
|
||||||
|
// groupedUnitTests maps suite names to top-level directories that should be
|
||||||
|
// included in that suite. The program adds an implicit group "rest" that
|
||||||
|
// includes all other top-level directories.
|
||||||
|
var groupedUnitTests = map[suite]dirs{
|
||||||
|
"unit-node": {"node"},
|
||||||
|
"unit-storage": {"storage", "extern"},
|
||||||
|
"unit-cli": {"cli", "cmd", "api"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
panic("expected path to repo as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
repo := os.Args[1]
|
||||||
|
|
||||||
|
tmpl := template.New("template.yml")
|
||||||
|
tmpl.Delims("[[", "]]")
|
||||||
|
tmpl.Funcs(template.FuncMap{
|
||||||
|
"stripSuffix": func(in string) string {
|
||||||
|
return strings.TrimSuffix(in, "_test.go")
|
||||||
|
},
|
||||||
|
})
|
||||||
|
tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
|
||||||
|
|
||||||
|
// list all itests.
|
||||||
|
itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// strip the dir from all entries.
|
||||||
|
for i, f := range itests {
|
||||||
|
itests[i] = filepath.Base(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate the exclusion set of unit test directories to exclude because
|
||||||
|
// they are already included in a grouped suite.
|
||||||
|
var excluded = map[string]struct{}{}
|
||||||
|
for _, ss := range groupedUnitTests {
|
||||||
|
for _, s := range ss {
|
||||||
|
e, err := filepath.Abs(filepath.Join(repo, s))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
excluded[e] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// all unit tests top-level dirs that are not itests, nor included in other suites.
|
||||||
|
var rest = map[string]struct{}{}
|
||||||
|
err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
|
||||||
|
// include all tests that aren't in the itests directory.
|
||||||
|
if strings.Contains(path, "itests") {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
// exclude all tests included in other suites
|
||||||
|
if f.IsDir() {
|
||||||
|
if _, ok := excluded[path]; ok {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(path, "_test.go") {
|
||||||
|
rel, err := filepath.Rel(repo, path)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
// take the first directory
|
||||||
|
rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add other directories to a 'rest' suite.
|
||||||
|
for k := range rest {
|
||||||
|
groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// map iteration guarantees no order, so sort the array in-place.
|
||||||
|
sort.Strings(groupedUnitTests["unit-rest"])
|
||||||
|
|
||||||
|
// form the input data.
|
||||||
|
type data struct {
|
||||||
|
ItestFiles []string
|
||||||
|
UnitSuites map[string]string
|
||||||
|
}
|
||||||
|
in := data{
|
||||||
|
ItestFiles: itests,
|
||||||
|
UnitSuites: func() map[string]string {
|
||||||
|
ret := make(map[string]string)
|
||||||
|
for name, dirs := range groupedUnitTests {
|
||||||
|
for i, d := range dirs {
|
||||||
|
dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
|
||||||
|
}
|
||||||
|
ret[name] = strings.Join(dirs, " ")
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := os.Create("./config.yml")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
// execute the template.
|
||||||
|
if err := tmpl.Execute(out, in); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
902
.circleci/template.yml
Normal file
902
.circleci/template.yml
Normal file
@ -0,0 +1,902 @@
|
|||||||
|
version: 2.1
|
||||||
|
orbs:
|
||||||
|
go: gotest/tools@0.0.13
|
||||||
|
aws-cli: circleci/aws-cli@1.3.2
|
||||||
|
packer: salaxander/packer@0.0.3
|
||||||
|
|
||||||
|
executors:
|
||||||
|
golang:
|
||||||
|
docker:
|
||||||
|
- image: circleci/golang:1.16.4
|
||||||
|
resource_class: 2xlarge
|
||||||
|
ubuntu:
|
||||||
|
docker:
|
||||||
|
- image: ubuntu:20.04
|
||||||
|
|
||||||
|
commands:
|
||||||
|
install-deps:
|
||||||
|
steps:
|
||||||
|
- go/install-ssh
|
||||||
|
- go/install: {package: git}
|
||||||
|
prepare:
|
||||||
|
parameters:
|
||||||
|
linux:
|
||||||
|
default: true
|
||||||
|
description: is a linux build environment?
|
||||||
|
type: boolean
|
||||||
|
darwin:
|
||||||
|
default: false
|
||||||
|
description: is a darwin build environment?
|
||||||
|
type: boolean
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- git_fetch_all_tags
|
||||||
|
- checkout
|
||||||
|
- when:
|
||||||
|
condition: << parameters.linux >>
|
||||||
|
steps:
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
- run: git submodule sync
|
||||||
|
- run: git submodule update --init
|
||||||
|
download-params:
|
||||||
|
steps:
|
||||||
|
- restore_cache:
|
||||||
|
name: Restore parameters cache
|
||||||
|
keys:
|
||||||
|
- 'v25-2k-lotus-params'
|
||||||
|
paths:
|
||||||
|
- /var/tmp/filecoin-proof-parameters/
|
||||||
|
- run: ./lotus fetch-params 2048
|
||||||
|
- save_cache:
|
||||||
|
name: Save parameters cache
|
||||||
|
key: 'v25-2k-lotus-params'
|
||||||
|
paths:
|
||||||
|
- /var/tmp/filecoin-proof-parameters/
|
||||||
|
install_ipfs:
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
apt update
|
||||||
|
apt install -y wget
|
||||||
|
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
|
||||||
|
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
|
||||||
|
if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
|
||||||
|
then
|
||||||
|
echo "ipfs failed checksum check"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
|
||||||
|
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||||
|
chmod +x /usr/local/bin/ipfs
|
||||||
|
git_fetch_all_tags:
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: fetch all tags
|
||||||
|
command: |
|
||||||
|
git fetch --all
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
mod-tidy-check:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- go/mod-tidy-check
|
||||||
|
|
||||||
|
build-all:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- run: sudo apt-get install npm
|
||||||
|
- run:
|
||||||
|
command: make buildall
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus-miner
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus-worker
|
||||||
|
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux
|
||||||
|
|
||||||
|
build-debug:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: make debug
|
||||||
|
|
||||||
|
test:
|
||||||
|
description: |
|
||||||
|
Run tests with gotestsum.
|
||||||
|
parameters: &test-params
|
||||||
|
executor:
|
||||||
|
type: executor
|
||||||
|
default: golang
|
||||||
|
go-test-flags:
|
||||||
|
type: string
|
||||||
|
default: "-timeout 30m"
|
||||||
|
description: Flags passed to go test.
|
||||||
|
target:
|
||||||
|
type: string
|
||||||
|
default: "./..."
|
||||||
|
description: Import paths of packages to be tested.
|
||||||
|
proofs-log-test:
|
||||||
|
type: string
|
||||||
|
default: "0"
|
||||||
|
suite:
|
||||||
|
type: string
|
||||||
|
default: unit
|
||||||
|
description: Test suite name to report to CircleCI.
|
||||||
|
gotestsum-format:
|
||||||
|
type: string
|
||||||
|
default: standard-verbose
|
||||||
|
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||||
|
coverage:
|
||||||
|
type: string
|
||||||
|
default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/...
|
||||||
|
description: Coverage flag. Set to the empty string to disable.
|
||||||
|
codecov-upload:
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
description: |
|
||||||
|
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
|
||||||
|
set as an environment variable for private projects.
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: make deps lotus
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- download-params
|
||||||
|
- go/install-gotestsum:
|
||||||
|
gobin: $HOME/.local/bin
|
||||||
|
version: 0.5.2
|
||||||
|
- run:
|
||||||
|
name: go test
|
||||||
|
environment:
|
||||||
|
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
|
||||||
|
SKIP_CONFORMANCE: "1"
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||||
|
mkdir -p /tmp/test-artifacts
|
||||||
|
gotestsum \
|
||||||
|
--format << parameters.gotestsum-format >> \
|
||||||
|
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||||
|
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||||
|
-- \
|
||||||
|
<< parameters.coverage >> \
|
||||||
|
<< parameters.go-test-flags >> \
|
||||||
|
<< parameters.target >>
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||||
|
- when:
|
||||||
|
condition: << parameters.codecov-upload >>
|
||||||
|
steps:
|
||||||
|
- go/install: {package: bash}
|
||||||
|
- go/install: {package: curl}
|
||||||
|
- run:
|
||||||
|
shell: /bin/bash -eo pipefail
|
||||||
|
command: |
|
||||||
|
bash <(curl -s https://codecov.io/bash)
|
||||||
|
|
||||||
|
test-conformance:
|
||||||
|
description: |
|
||||||
|
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||||
|
implementations to test their correctness and compliance with the Filecoin
|
||||||
|
specifications.
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
vectors-branch:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: |
|
||||||
|
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||||
|
test with. If empty (the default) the commit defined by the git
|
||||||
|
submodule is used.
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: make deps lotus
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- download-params
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not:
|
||||||
|
equal: [ "", << parameters.vectors-branch >> ]
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: checkout vectors branch
|
||||||
|
command: |
|
||||||
|
cd extern/test-vectors
|
||||||
|
git fetch
|
||||||
|
git checkout origin/<< parameters.vectors-branch >>
|
||||||
|
- go/install-gotestsum:
|
||||||
|
gobin: $HOME/.local/bin
|
||||||
|
version: 0.5.2
|
||||||
|
- run:
|
||||||
|
name: install statediff globally
|
||||||
|
command: |
|
||||||
|
## statediff is optional; we succeed even if compilation fails.
|
||||||
|
mkdir -p /tmp/statediff
|
||||||
|
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
|
||||||
|
cd /tmp/statediff
|
||||||
|
go install ./cmd/statediff || exit 0
|
||||||
|
- run:
|
||||||
|
name: go test
|
||||||
|
environment:
|
||||||
|
SKIP_CONFORMANCE: "0"
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/test-reports
|
||||||
|
mkdir -p /tmp/test-artifacts
|
||||||
|
gotestsum \
|
||||||
|
--format pkgname-and-test-fails \
|
||||||
|
--junitfile /tmp/test-reports/junit.xml \
|
||||||
|
-- \
|
||||||
|
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
|
||||||
|
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
build-ntwk-calibration:
|
||||||
|
description: |
|
||||||
|
Compile lotus binaries for the calibration network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make calibnet
|
||||||
|
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-calibrationnet
|
||||||
|
build-ntwk-butterfly:
|
||||||
|
description: |
|
||||||
|
Compile lotus binaries for the butterfly network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make butterflynet
|
||||||
|
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-butterflynet
|
||||||
|
build-ntwk-nerpa:
|
||||||
|
description: |
|
||||||
|
Compile lotus binaries for the nerpa network
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make nerpanet
|
||||||
|
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- linux-nerpanet
|
||||||
|
build-lotus-soup:
|
||||||
|
description: |
|
||||||
|
Compile `lotus-soup` Testground test plan
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: cd extern/filecoin-ffi && make
|
||||||
|
- run:
|
||||||
|
name: "go get lotus@master"
|
||||||
|
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
|
||||||
|
- run:
|
||||||
|
name: "build lotus-soup testplan"
|
||||||
|
command: pushd testplans/lotus-soup && go build -tags=testground .
|
||||||
|
trigger-testplans:
|
||||||
|
description: |
|
||||||
|
Trigger `lotus-soup` test cases on TaaS
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
name: "download testground"
|
||||||
|
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
|
||||||
|
- run:
|
||||||
|
name: "prepare .env.toml"
|
||||||
|
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
|
||||||
|
- run:
|
||||||
|
name: "prepare testground home dir and link test plans"
|
||||||
|
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
|
||||||
|
- run:
|
||||||
|
name: "go get lotus@master"
|
||||||
|
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
|
||||||
|
- run:
|
||||||
|
name: "trigger deals baseline testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
- run:
|
||||||
|
name: "trigger payment channel stress testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
- run:
|
||||||
|
name: "trigger graphsync testplan on taas"
|
||||||
|
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
|
||||||
|
|
||||||
|
|
||||||
|
build-macos:
|
||||||
|
description: build darwin lotus binary
|
||||||
|
macos:
|
||||||
|
xcode: "10.0.0"
|
||||||
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
|
steps:
|
||||||
|
- prepare:
|
||||||
|
linux: false
|
||||||
|
darwin: true
|
||||||
|
- run:
|
||||||
|
name: Install go
|
||||||
|
command: |
|
||||||
|
curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
|
||||||
|
sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
|
||||||
|
- run:
|
||||||
|
name: Install pkg-config
|
||||||
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||||
|
- run: go version
|
||||||
|
- run:
|
||||||
|
name: Install Rust
|
||||||
|
command: |
|
||||||
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
|
- run:
|
||||||
|
name: Install jq
|
||||||
|
command: |
|
||||||
|
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
||||||
|
chmod +x /usr/local/bin/jq
|
||||||
|
- run:
|
||||||
|
name: Install hwloc
|
||||||
|
command: |
|
||||||
|
mkdir ~/hwloc
|
||||||
|
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
|
||||||
|
cd ~/hwloc
|
||||||
|
tar -xvzpf hwloc-2.4.1.tar.gz
|
||||||
|
cd hwloc-2.4.1
|
||||||
|
./configure && make && sudo make install
|
||||||
|
- restore_cache:
|
||||||
|
name: restore cargo cache
|
||||||
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
|
- install-deps
|
||||||
|
- run:
|
||||||
|
command: make build
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus-miner
|
||||||
|
- store_artifacts:
|
||||||
|
path: lotus-worker
|
||||||
|
- run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- darwin
|
||||||
|
- save_cache:
|
||||||
|
name: save cargo cache
|
||||||
|
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||||
|
paths:
|
||||||
|
- "~/.rustup"
|
||||||
|
- "~/.cargo"
|
||||||
|
|
||||||
|
build-appimage:
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202104-01
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- run:
|
||||||
|
name: install appimage-builder
|
||||||
|
command: |
|
||||||
|
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
|
||||||
|
sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
|
||||||
|
sudo chmod +x /usr/local/bin/appimagetool
|
||||||
|
sudo pip3 install appimage-builder
|
||||||
|
- run:
|
||||||
|
name: install lotus dependencies
|
||||||
|
command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
- run:
|
||||||
|
name: build appimage
|
||||||
|
command: |
|
||||||
|
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
|
||||||
|
make appimage
|
||||||
|
- run:
|
||||||
|
name: prepare workspace
|
||||||
|
command: |
|
||||||
|
mkdir appimage
|
||||||
|
mv Lotus-*.AppImage appimage
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: "."
|
||||||
|
paths:
|
||||||
|
- appimage
|
||||||
|
|
||||||
|
|
||||||
|
gofmt:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: "! go fmt ./... 2>&1 | read"
|
||||||
|
|
||||||
|
gen-check:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: make deps
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||||
|
- run: make gen
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
- run: make docsgen-cli
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
|
docs-check:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
||||||
|
- run: make deps
|
||||||
|
- run: make docsgen
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
||||||
|
- run: git --no-pager diff
|
||||||
|
- run: diff ../pre-openrpc-full ../post-openrpc-full
|
||||||
|
- run: diff ../pre-openrpc-miner ../post-openrpc-miner
|
||||||
|
- run: diff ../pre-openrpc-worker ../post-openrpc-worker
|
||||||
|
- run: git --no-pager diff --quiet
|
||||||
|
|
||||||
|
lint: &lint
|
||||||
|
description: |
|
||||||
|
Run golangci-lint.
|
||||||
|
parameters:
|
||||||
|
executor:
|
||||||
|
type: executor
|
||||||
|
default: golang
|
||||||
|
golangci-lint-version:
|
||||||
|
type: string
|
||||||
|
default: 1.27.0
|
||||||
|
concurrency:
|
||||||
|
type: string
|
||||||
|
default: '2'
|
||||||
|
description: |
|
||||||
|
Concurrency used to run linters. Defaults to 2 because NumCPU is not
|
||||||
|
aware of container CPU limits.
|
||||||
|
args:
|
||||||
|
type: string
|
||||||
|
default: ''
|
||||||
|
description: |
|
||||||
|
Arguments to pass to golangci-lint
|
||||||
|
executor: << parameters.executor >>
|
||||||
|
steps:
|
||||||
|
- install-deps
|
||||||
|
- prepare
|
||||||
|
- run:
|
||||||
|
command: make deps
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- go/install-golangci-lint:
|
||||||
|
gobin: $HOME/.local/bin
|
||||||
|
version: << parameters.golangci-lint-version >>
|
||||||
|
- run:
|
||||||
|
name: Lint
|
||||||
|
command: |
|
||||||
|
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
||||||
|
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||||
|
lint-all:
|
||||||
|
<<: *lint
|
||||||
|
|
||||||
|
publish:
|
||||||
|
description: publish binary artifacts
|
||||||
|
executor: ubuntu
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Install git jq curl
|
||||||
|
command: apt update && apt install -y git jq curl
|
||||||
|
- checkout
|
||||||
|
- git_fetch_all_tags
|
||||||
|
- checkout
|
||||||
|
- install_ipfs
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- run:
|
||||||
|
name: Create bundles
|
||||||
|
command: ./scripts/build-bundle.sh
|
||||||
|
- run:
|
||||||
|
name: Publish release
|
||||||
|
command: ./scripts/publish-release.sh
|
||||||
|
|
||||||
|
publish-snapcraft:
|
||||||
|
description: build and push snapcraft
|
||||||
|
machine:
|
||||||
|
image: ubuntu-2004:202104-01
|
||||||
|
resource_class: 2xlarge
|
||||||
|
parameters:
|
||||||
|
channel:
|
||||||
|
type: string
|
||||||
|
default: "edge"
|
||||||
|
description: snapcraft channel
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run:
|
||||||
|
name: install snapcraft
|
||||||
|
command: sudo snap install snapcraft --classic
|
||||||
|
- run:
|
||||||
|
name: create snapcraft config file
|
||||||
|
command: |
|
||||||
|
mkdir -p ~/.config/snapcraft
|
||||||
|
echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
|
||||||
|
- run:
|
||||||
|
name: build snap
|
||||||
|
command: snapcraft --use-lxd
|
||||||
|
- run:
|
||||||
|
name: publish snap
|
||||||
|
command: snapcraft push *.snap --release << parameters.channel >>
|
||||||
|
|
||||||
|
build-and-push-image:
|
||||||
|
description: build and push docker images to public AWS ECR registry
|
||||||
|
executor: aws-cli/default
|
||||||
|
parameters:
|
||||||
|
profile-name:
|
||||||
|
type: string
|
||||||
|
default: "default"
|
||||||
|
description: AWS profile name to be configured.
|
||||||
|
|
||||||
|
aws-access-key-id:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_ACCESS_KEY_ID
|
||||||
|
description: >
|
||||||
|
AWS access key id for IAM role. Set this to the name of
|
||||||
|
the environment variable you will set to hold this
|
||||||
|
value, i.e. AWS_ACCESS_KEY.
|
||||||
|
|
||||||
|
aws-secret-access-key:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_SECRET_ACCESS_KEY
|
||||||
|
description: >
|
||||||
|
AWS secret key for IAM role. Set this to the name of
|
||||||
|
the environment variable you will set to hold this
|
||||||
|
value, i.e. AWS_SECRET_ACCESS_KEY.
|
||||||
|
|
||||||
|
region:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_REGION
|
||||||
|
description: >
|
||||||
|
Name of env var storing your AWS region information,
|
||||||
|
defaults to AWS_REGION
|
||||||
|
|
||||||
|
account-url:
|
||||||
|
type: env_var_name
|
||||||
|
default: AWS_ECR_ACCOUNT_URL
|
||||||
|
description: >
|
||||||
|
Env var storing Amazon ECR account URL that maps to an AWS account,
|
||||||
|
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
|
||||||
|
defaults to AWS_ECR_ACCOUNT_URL
|
||||||
|
|
||||||
|
dockerfile:
|
||||||
|
type: string
|
||||||
|
default: Dockerfile
|
||||||
|
description: Name of dockerfile to use. Defaults to Dockerfile.
|
||||||
|
|
||||||
|
path:
|
||||||
|
type: string
|
||||||
|
default: .
|
||||||
|
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
|
||||||
|
|
||||||
|
extra-build-args:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: >
|
||||||
|
Extra flags to pass to docker build. For examples, see
|
||||||
|
https://docs.docker.com/engine/reference/commandline/build
|
||||||
|
|
||||||
|
repo:
|
||||||
|
type: string
|
||||||
|
description: Name of an Amazon ECR repository
|
||||||
|
|
||||||
|
tag:
|
||||||
|
type: string
|
||||||
|
default: "latest"
|
||||||
|
description: A comma-separated string containing docker image tags to build and push (default = latest)
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Confirm that environment variables are set
|
||||||
|
command: |
|
||||||
|
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
|
||||||
|
echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
|
||||||
|
circleci-agent step halt
|
||||||
|
fi
|
||||||
|
|
||||||
|
- aws-cli/setup:
|
||||||
|
profile-name: <<parameters.profile-name>>
|
||||||
|
aws-access-key-id: <<parameters.aws-access-key-id>>
|
||||||
|
aws-secret-access-key: <<parameters.aws-secret-access-key>>
|
||||||
|
aws-region: <<parameters.region>>
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Log into Amazon ECR
|
||||||
|
command: |
|
||||||
|
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
|
||||||
|
|
||||||
|
- checkout
|
||||||
|
|
||||||
|
- setup_remote_docker:
|
||||||
|
version: 19.03.13
|
||||||
|
docker_layer_caching: false
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Build docker image
|
||||||
|
command: |
|
||||||
|
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
|
||||||
|
|
||||||
|
docker_tag_args=""
|
||||||
|
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||||
|
for tag in "${DOCKER_TAGS[@]}"; do
|
||||||
|
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
|
||||||
|
done
|
||||||
|
|
||||||
|
docker build \
|
||||||
|
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
|
||||||
|
-f <<parameters.path>>/<<parameters.dockerfile>> \
|
||||||
|
$docker_tag_args \
|
||||||
|
<<parameters.path>>
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: Push image to Amazon ECR
|
||||||
|
command: |
|
||||||
|
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
|
||||||
|
for tag in "${DOCKER_TAGS[@]}"; do
|
||||||
|
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
|
||||||
|
done
|
||||||
|
|
||||||
|
publish-packer-mainnet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-calibrationnet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-butterflynet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
|
||||||
|
publish-packer-nerpanet:
|
||||||
|
description: build and push AWS IAM and DigitalOcean droplet.
|
||||||
|
executor:
|
||||||
|
name: packer/default
|
||||||
|
packer-version: 1.6.6
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- attach_workspace:
|
||||||
|
at: "."
|
||||||
|
- packer/build:
|
||||||
|
template: tools/packer/lotus.pkr.hcl
|
||||||
|
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
|
||||||
|
|
||||||
|
workflows:
|
||||||
|
version: 2.1
|
||||||
|
ci:
|
||||||
|
jobs:
|
||||||
|
- lint-all:
|
||||||
|
concurrency: "16" # expend all docker 2xlarge CPUs.
|
||||||
|
- mod-tidy-check
|
||||||
|
- gofmt
|
||||||
|
- gen-check
|
||||||
|
- docs-check
|
||||||
|
|
||||||
|
[[- range $file := .ItestFiles -]]
|
||||||
|
[[ with $name := $file | stripSuffix ]]
|
||||||
|
- test:
|
||||||
|
name: test-itest-[[ $name ]]
|
||||||
|
suite: itest-[[ $name ]]
|
||||||
|
target: "./itests/[[ $file ]]"
|
||||||
|
[[ end ]]
|
||||||
|
[[- end -]]
|
||||||
|
|
||||||
|
[[range $suite, $pkgs := .UnitSuites]]
|
||||||
|
- test:
|
||||||
|
name: test-[[ $suite ]]
|
||||||
|
suite: utest-[[ $suite ]]
|
||||||
|
target: "[[ $pkgs ]]"
|
||||||
|
[[- end]]
|
||||||
|
- test:
|
||||||
|
go-test-flags: "-run=TestMulticoreSDR"
|
||||||
|
suite: multicore-sdr-check
|
||||||
|
target: "./extern/sector-storage/ffiwrapper"
|
||||||
|
proofs-log-test: "1"
|
||||||
|
- test-conformance:
|
||||||
|
suite: conformance
|
||||||
|
codecov-upload: false
|
||||||
|
target: "./conformance"
|
||||||
|
- test-conformance:
|
||||||
|
name: test-conformance-bleeding-edge
|
||||||
|
codecov-upload: false
|
||||||
|
suite: conformance-bleeding-edge
|
||||||
|
target: "./conformance"
|
||||||
|
vectors-branch: master
|
||||||
|
- trigger-testplans:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- build-debug
|
||||||
|
- build-all:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-calibration:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-butterfly:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-ntwk-nerpa:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-lotus-soup
|
||||||
|
- build-macos:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-appimage:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish:
|
||||||
|
requires:
|
||||||
|
- build-all
|
||||||
|
- build-macos
|
||||||
|
- build-appimage
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-and-push-image:
|
||||||
|
dockerfile: Dockerfile.lotus
|
||||||
|
path: .
|
||||||
|
repo: lotus-dev
|
||||||
|
tag: '${CIRCLE_SHA1:0:8}'
|
||||||
|
- publish-packer-mainnet:
|
||||||
|
requires:
|
||||||
|
- build-all
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-calibrationnet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-calibration
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-butterflynet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-butterfly
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-packer-nerpanet:
|
||||||
|
requires:
|
||||||
|
- build-ntwk-nerpa
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: publish-snapcraft-stable
|
||||||
|
channel: stable
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
|
||||||
|
nightly:
|
||||||
|
triggers:
|
||||||
|
- schedule:
|
||||||
|
cron: "0 0 * * *"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
jobs:
|
||||||
|
- publish-snapcraft:
|
||||||
|
name: publish-snapcraft-nightly
|
||||||
|
channel: edge
|
||||||
10
.codecov.yml
10
.codecov.yml
@ -5,5 +5,15 @@ ignore:
|
|||||||
- "api/test/*"
|
- "api/test/*"
|
||||||
- "gen/**/*"
|
- "gen/**/*"
|
||||||
- "gen/*"
|
- "gen/*"
|
||||||
|
- "cmd/lotus-shed/*"
|
||||||
|
- "cmd/tvx/*"
|
||||||
|
- "cmd/lotus-pcr/*"
|
||||||
|
- "cmd/tvx/*"
|
||||||
|
- "cmd/lotus-chainwatch/*"
|
||||||
|
- "cmd/lotus-health/*"
|
||||||
|
- "cmd/lotus-fountain/*"
|
||||||
|
- "cmd/lotus-townhall/*"
|
||||||
|
- "cmd/lotus-stats/*"
|
||||||
|
- "cmd/lotus-pcr/*"
|
||||||
github_checks:
|
github_checks:
|
||||||
annotations: false
|
annotations: false
|
||||||
|
|||||||
20
.github/CODEOWNERS
vendored
20
.github/CODEOWNERS
vendored
@ -1,16 +1,6 @@
|
|||||||
## filecoin-project/lotus CODEOWNERS
|
# Reference
|
||||||
## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
|
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
|
||||||
##
|
|
||||||
## These users or groups will be automatically assigned as reviewers every time
|
|
||||||
## a PR is submitted that modifies code in the specified locations.
|
|
||||||
##
|
|
||||||
## The Lotus repo configuration requires that at least ONE codeowner approves
|
|
||||||
## the PR before merging.
|
|
||||||
|
|
||||||
### Global owners.
|
# Global owners
|
||||||
* @magik6k @whyrusleeping @Kubuxu
|
# Ensure maintainers team is a requested reviewer for non-draft PRs
|
||||||
|
* @filecoin-project/lotus-maintainers
|
||||||
### Conformance testing.
|
|
||||||
conformance/ @raulk
|
|
||||||
extern/test-vectors @raulk
|
|
||||||
cmd/tvx @raulk
|
|
||||||
|
|||||||
33
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug-report.md
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
name: Bug Report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: "[BUG] "
|
||||||
|
labels: hint/needs-triaging, kind/bug
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
(If you are not sure what the bug is, try to figure it out via a [discussion](https://github.com/filecoin-project/lotus/discussions/new) first!
|
||||||
|
|
||||||
|
**Version (run `lotus version`):**
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Run '...'
|
||||||
|
2. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Logs**
|
||||||
|
Provide daemon/miner/worker logs, and goroutines(if available) for troubleshooting.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
||||||
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**To Reproduce**
|
|
||||||
Steps to reproduce the behavior:
|
|
||||||
1. Run '...'
|
|
||||||
2. See error
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
||||||
|
|
||||||
**Screenshots**
|
|
||||||
If applicable, add screenshots to help explain your problem.
|
|
||||||
|
|
||||||
**Version (run `lotus version`):**
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context about the problem here.
|
|
||||||
49
.github/ISSUE_TEMPLATE/deal-making-issues.md
vendored
Normal file
49
.github/ISSUE_TEMPLATE/deal-making-issues.md
vendored
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
---
|
||||||
|
name: Deal Making Issues
|
||||||
|
about: Create a report for help with deal making failures.
|
||||||
|
title: "[Deal Making Issue]"
|
||||||
|
labels: hint/needs-triaging, area/markets
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||||
|
|
||||||
|
Please provide all the information requested here to help us troubleshoot "deal making failed" issues.
|
||||||
|
If the information requested is missing, we will probably have to just ask you to provide it anyway,
|
||||||
|
before we can help debug.
|
||||||
|
|
||||||
|
**Basic Information**
|
||||||
|
Including information like, Are you the client or the miner? Is this a storage deal or a retrieval deal? Is it an offline deal?
|
||||||
|
|
||||||
|
**Describe the problem**
|
||||||
|
|
||||||
|
A brief description of the problem you encountered while trying to make a deal.
|
||||||
|
|
||||||
|
**Version**
|
||||||
|
|
||||||
|
The output of `lotus --version`.
|
||||||
|
|
||||||
|
**Setup**
|
||||||
|
|
||||||
|
You miner(if applicable) and daemon setup, i.e: What hardware do you use, how much ram and etc.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Run '...'
|
||||||
|
2. See error
|
||||||
|
|
||||||
|
**Deal status**
|
||||||
|
|
||||||
|
The output of `lotus client list-deals -v` and/or `lotus-miner storage-deals|retrieval-deals|data-transfers list [-v]` commands for the deal(s) in question.
|
||||||
|
|
||||||
|
**Lotus daemon and miner logs**
|
||||||
|
|
||||||
|
Please go through the logs of your daemon and miner(if applicable), and include screenshots of any error/warning-like messages you find.
|
||||||
|
|
||||||
|
Alternatively please upload full log files and share a link here
|
||||||
|
|
||||||
|
** Code modifications **
|
||||||
|
|
||||||
|
If you have modified parts of lotus, please describe which areas were modified,
|
||||||
|
and the scope of those modifications
|
||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: "[Feature Request]"
|
||||||
|
labels: hint/needs-triaging
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
35
.github/ISSUE_TEMPLATE/mining-issues.md
vendored
Normal file
35
.github/ISSUE_TEMPLATE/mining-issues.md
vendored
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
name: Mining Issues
|
||||||
|
about: Create a report for help with mining failures.
|
||||||
|
title: "[Mining Issue]"
|
||||||
|
labels: hint/needs-triaging, area/mining
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||||
|
|
||||||
|
Please provide all the information requested here to help us troubleshoot "mining/WinningPoSt failed" issues.
|
||||||
|
If the information requested is missing, you may be asked you to provide it.
|
||||||
|
|
||||||
|
**Describe the problem**
|
||||||
|
A brief description of the problem you encountered while mining new blocks.
|
||||||
|
|
||||||
|
**Version**
|
||||||
|
|
||||||
|
The output of `lotus --version`.
|
||||||
|
|
||||||
|
**Setup**
|
||||||
|
|
||||||
|
You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
|
||||||
|
|
||||||
|
**Lotus daemon and miner logs**
|
||||||
|
|
||||||
|
Please go through the logs of your daemon and miner, and include screenshots of any error/warning-like messages you find, highlighting the one has "winning post" in it.
|
||||||
|
|
||||||
|
Alternatively please upload full log files and share a link here
|
||||||
|
|
||||||
|
** Code modifications **
|
||||||
|
|
||||||
|
If you have modified parts of lotus, please describe which areas were modified,
|
||||||
|
and the scope of those modifications
|
||||||
46
.github/ISSUE_TEMPLATE/proving-issues.md
vendored
Normal file
46
.github/ISSUE_TEMPLATE/proving-issues.md
vendored
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
name: Proving Issues
|
||||||
|
about: Create a report for help with proving failures.
|
||||||
|
title: "[Proving Issue]"
|
||||||
|
labels: area/proving, hint/needs-triaging
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||||
|
|
||||||
|
Please provide all the information requested here to help us troubleshoot "proving/window PoSt failed" issues.
|
||||||
|
If the information requested is missing, we will probably have to just ask you to provide it anyway,
|
||||||
|
before we can help debug.
|
||||||
|
|
||||||
|
**Describe the problem**
|
||||||
|
A brief description of the problem you encountered while proving the storage.
|
||||||
|
|
||||||
|
**Version**
|
||||||
|
|
||||||
|
The output of `lotus --version`.
|
||||||
|
|
||||||
|
**Setup**
|
||||||
|
|
||||||
|
You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
|
||||||
|
|
||||||
|
**Proving status**
|
||||||
|
|
||||||
|
The output of `lotus-miner proving` info.
|
||||||
|
|
||||||
|
**Lotus miner logs**
|
||||||
|
|
||||||
|
Please go through the logs of your miner, and include screenshots of any error-like messages you find, highlighting the one has "window post" in it.
|
||||||
|
|
||||||
|
Alternatively please upload full log files and share a link here
|
||||||
|
|
||||||
|
**Lotus miner diagnostic info**
|
||||||
|
|
||||||
|
Please collect the following diagnostic information, and share a link here
|
||||||
|
|
||||||
|
* lotus-miner diagnostic info `lotus-miner info all > allinfo.txt`
|
||||||
|
|
||||||
|
** Code modifications **
|
||||||
|
|
||||||
|
If you have modified parts of lotus, please describe which areas were modified,
|
||||||
|
and the scope of those modifications
|
||||||
@ -1,21 +1,32 @@
|
|||||||
---
|
---
|
||||||
name: Sealing Issues
|
name: Sealing Issues
|
||||||
about: Create a report for help with sealing (commit) failures.
|
about: Create a report for help with sealing (commit) failures.
|
||||||
title: ''
|
title: "[Sealing Issue]"
|
||||||
labels: 'sealing'
|
labels: hint/needs-triaging, area/sealing
|
||||||
assignees: ''
|
assignees: ''
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
> Note: For security-related bugs/issues, please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
|
||||||
|
|
||||||
Please provide all the information requested here to help us troubleshoot "commit failed" issues.
|
Please provide all the information requested here to help us troubleshoot "commit failed" issues.
|
||||||
If the information requested is missing, we will probably have to just ask you to provide it anyway,
|
If the information requested is missing, we will probably have to just ask you to provide it anyway,
|
||||||
before we can help debug.
|
before we can help debug.
|
||||||
|
|
||||||
**Describe the problem**
|
**Describe the problem**
|
||||||
|
A brief description of the problem you encountered while sealing a sector.
|
||||||
|
|
||||||
A brief description of the problem you encountered while proving (sealing) a sector.
|
**Version**
|
||||||
|
|
||||||
Including what commands you ran, and a description of your setup, is very helpful.
|
The output of `lotus --version`.
|
||||||
|
|
||||||
|
**Setup**
|
||||||
|
|
||||||
|
You miner and daemon setup, including what hardware do you use, your environment variable settings, how do you run your miner and worker, do you use GPU and etc.
|
||||||
|
|
||||||
|
**Commands**
|
||||||
|
|
||||||
|
Commands you ran.
|
||||||
|
|
||||||
**Sectors status**
|
**Sectors status**
|
||||||
|
|
||||||
@ -37,7 +48,3 @@ Please collect the following diagnostic information, and share a link here
|
|||||||
|
|
||||||
If you have modified parts of lotus, please describe which areas were modified,
|
If you have modified parts of lotus, please describe which areas were modified,
|
||||||
and the scope of those modifications
|
and the scope of those modifications
|
||||||
|
|
||||||
**Version**
|
|
||||||
|
|
||||||
The output of `lotus --version`.
|
|
||||||
69
.github/workflows/codeql-analysis.yml
vendored
Normal file
69
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v1
|
||||||
|
with:
|
||||||
|
go-version: '1.16.4'
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
||||||
27
.github/workflows/stale.yml
vendored
Normal file
27
.github/workflows/stale.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
name: Close and mark stale issue
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 0 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@v3
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
|
||||||
|
close-issue-message: 'This issue was closed because it is missing author input.'
|
||||||
|
stale-issue-label: 'kind/stale'
|
||||||
|
any-of-labels: 'hint/needs-author-input'
|
||||||
|
days-before-issue-stale: 5
|
||||||
|
days-before-issue-close: 1
|
||||||
|
enable-statistics: true
|
||||||
|
|
||||||
|
|
||||||
29
.github/workflows/testground-on-push.yml
vendored
Normal file
29
.github/workflows/testground-on-push.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: Testground PR Checker
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
testground:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: ${{ matrix.composition_file }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- backend_addr: ci.testground.ipfs.team
|
||||||
|
backend_proto: https
|
||||||
|
plan_directory: testplans/lotus-soup
|
||||||
|
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
|
||||||
|
- backend_addr: ci.testground.ipfs.team
|
||||||
|
backend_proto: https
|
||||||
|
plan_directory: testplans/lotus-soup
|
||||||
|
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: testground run
|
||||||
|
uses: coryschwartz/testground-github-action@v1.1
|
||||||
|
with:
|
||||||
|
backend_addr: ${{ matrix.backend_addr }}
|
||||||
|
backend_proto: ${{ matrix.backend_proto }}
|
||||||
|
plan_directory: ${{ matrix.plan_directory }}
|
||||||
|
composition_file: ${{ matrix.composition_file }}
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,3 +1,6 @@
|
|||||||
|
/AppDir
|
||||||
|
/appimage-builder-cache
|
||||||
|
*.AppImage
|
||||||
/lotus
|
/lotus
|
||||||
/lotus-miner
|
/lotus-miner
|
||||||
/lotus-worker
|
/lotus-worker
|
||||||
@ -5,6 +8,7 @@
|
|||||||
/lotus-health
|
/lotus-health
|
||||||
/lotus-chainwatch
|
/lotus-chainwatch
|
||||||
/lotus-shed
|
/lotus-shed
|
||||||
|
/lotus-sim
|
||||||
/lotus-pond
|
/lotus-pond
|
||||||
/lotus-townhall
|
/lotus-townhall
|
||||||
/lotus-fountain
|
/lotus-fountain
|
||||||
@ -13,6 +17,9 @@
|
|||||||
/lotus-gateway
|
/lotus-gateway
|
||||||
/lotus-pcr
|
/lotus-pcr
|
||||||
/lotus-wallet
|
/lotus-wallet
|
||||||
|
/lotus-keygen
|
||||||
|
/docgen-md
|
||||||
|
/docgen-openrpc
|
||||||
/bench.json
|
/bench.json
|
||||||
/lotuspond/front/node_modules
|
/lotuspond/front/node_modules
|
||||||
/lotuspond/front/build
|
/lotuspond/front/build
|
||||||
|
|||||||
9
.gitmodules
vendored
9
.gitmodules
vendored
@ -1,16 +1,9 @@
|
|||||||
[submodule "extern/filecoin-ffi"]
|
[submodule "extern/filecoin-ffi"]
|
||||||
path = extern/filecoin-ffi
|
path = extern/filecoin-ffi
|
||||||
url = https://github.com/filecoin-project/filecoin-ffi.git
|
url = https://github.com/filecoin-project/filecoin-ffi.git
|
||||||
branch = master
|
|
||||||
[submodule "extern/serialization-vectors"]
|
[submodule "extern/serialization-vectors"]
|
||||||
path = extern/serialization-vectors
|
path = extern/serialization-vectors
|
||||||
url = https://github.com/filecoin-project/serialization-vectors
|
url = https://github.com/filecoin-project/serialization-vectors.git
|
||||||
[submodule "extern/test-vectors"]
|
[submodule "extern/test-vectors"]
|
||||||
path = extern/test-vectors
|
path = extern/test-vectors
|
||||||
url = https://github.com/filecoin-project/test-vectors.git
|
url = https://github.com/filecoin-project/test-vectors.git
|
||||||
[submodule "extern/fil-blst"]
|
|
||||||
path = extern/fil-blst
|
|
||||||
url = https://github.com/filecoin-project/fil-blst.git
|
|
||||||
[submodule "extern/oni"]
|
|
||||||
path = extern/oni
|
|
||||||
url = https://github.com/filecoin-project/oni
|
|
||||||
|
|||||||
@ -16,6 +16,12 @@ linters:
|
|||||||
- deadcode
|
- deadcode
|
||||||
- scopelint
|
- scopelint
|
||||||
|
|
||||||
|
# We don't want to skip builtin/
|
||||||
|
skip-dirs-use-default: false
|
||||||
|
skip-dirs:
|
||||||
|
- vendor$
|
||||||
|
- testdata$
|
||||||
|
- examples$
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude:
|
exclude:
|
||||||
|
|||||||
1
AppDir/usr/share/icons/icon.svg
Normal file
1
AppDir/usr/share/icons/icon.svg
Normal file
@ -0,0 +1 @@
|
|||||||
|
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0" y="0" viewBox="0 0 127 127" xml:space="preserve" enable-background="new 0 0 127 127"><style type="text/css">.st0{fill:#00d2d6}.st1{fill:#fff}</style><g><path class="st0" d="M63.5,127C28.5,127.1-0.2,98.4,0,63.2C0.2,28.3,28.6-0.2,63.9,0c34.8,0.2,63.3,28.7,63.1,64 C126.7,98.7,98.5,127.1,63.5,127z M71.4,57.6c5.5,0.8,11,1.5,16.5,2.3c0.5-1.7,0.9-3.1,1.3-4.7c-5.7-0.8-11.2-1.7-17.1-2.5 c2-7,3.7-13.7,5.8-20.2c0.7-2.2,2.3-4.2,3.9-5.9c2.1-2.2,5-1.7,6.8,0.7c0.7,1,1.4,2.1,2.3,2.9c1.1,1.1,2.8,1.6,4,0.6 c0.8-0.7,0.7-2.4,0.8-3.6c0-0.5-0.6-1.1-1-1.6c-2-2.3-4.7-3.1-7.5-3.2c-6.3-0.3-10.9,3-14.5,7.8c-3.5,4.8-5.1,10.5-6.8,16.2 c-0.5,1.6-0.9,3.3-1.4,5.1c-6.2-0.9-12.1-1.7-18.2-2.6c-0.2,1.6-0.4,3.2-0.6,4.8c6,0.9,11.8,1.8,17.8,2.7c-0.8,3.4-1.5,6.5-2.3,9.7 c-5.8-0.8-11.4-1.6-17-2.4c-0.2,1.8-0.4,3.2-0.6,4.8c5.6,0.9,11,1.7,16.5,2.5c0,0.6,0.1,1,0,1.3c-1.7,7.4-3.4,14.8-5.3,22.2 c-0.9,3.5-2.4,6.9-5.3,9.3c-2.4,2-5,1.7-6.8-0.8c-0.8-1.1-1.5-2.5-2.6-3.3c-0.8-0.6-2.5-0.9-3.1-0.5c-0.9,0.7-1.5,2.2-1.4,3.3 c0.1,1,1,2.2,1.9,2.8c3,2.3,6.5,2.6,10,1.9c5.9-1.2,10.1-4.9,12.7-10.1c2-4.1,3.6-8.5,5-12.9c1.3-4,2.2-8,3.3-12.2 c5.8,0.8,11.5,1.7,17.3,2.5c0.5-1.7,0.9-3.2,1.4-4.8c-6.1-0.9-11.9-1.7-17.7-2.6C70.1,64,70.7,60.9,71.4,57.6z"/><path class="st1" d="M71.4,57.6c-0.7,3.3-1.3,6.4-2,9.8c5.9,0.9,11.7,1.7,17.7,2.6c-0.5,1.6-0.9,3.1-1.4,4.8 c-5.8-0.8-11.5-1.7-17.3-2.5c-1.1,4.2-2,8.3-3.3,12.2c-1.4,4.4-3,8.7-5,12.9c-2.6,5.2-6.8,8.9-12.7,10.1c-3.5,0.7-7,0.4-10-1.9 c-0.9-0.7-1.8-1.8-1.9-2.8c-0.1-1.1,0.5-2.7,1.4-3.3c0.6-0.5,2.3-0.1,3.1,0.5c1.1,0.8,1.8,2.1,2.6,3.3c1.8,2.5,4.4,2.9,6.8,0.8 c2.9-2.5,4.4-5.8,5.3-9.3c1.9-7.3,3.6-14.8,5.3-22.2c0.1-0.3,0-0.7,0-1.3c-5.4-0.8-10.8-1.7-16.5-2.5c0.2-1.6,0.4-3,0.6-4.8 c5.6,0.8,11.1,1.6,17,2.4c0.8-3.2,1.5-6.4,2.3-9.7c-6-0.9-11.7-1.8-17.8-2.7c0.2-1.6,0.4-3.2,0.6-4.8c6.1,0.9,12,1.7,18.2,2.6 c0.5-1.8,0.9-3.5,1.4-5.1c1.7-5.6,3.2-11.3,6.8-16.2c3.6-4.9,8.1-8.1,14.5-7.8c2.8,0.1,5.5,0.9,7.5,3.2c0.4,0.5,1,1.1,1,1.6 c-0.1,1.2,0,2.9-0.8,3.6c-1.1,1.1-2.8,0.5-4-0.6c-0.9-0.9-1.6-1.9-2.3-2.9c-1.8-2.4-4.7-2.9-6.8-0.7c-1.6,1.7-3.2,3.7-3.9,5.9 C75.7,39.4,74,46,72,53c5.9,0.9,11.4,1.7,17.1,2.5c-0.5,1.6-0.9,3.1-1.3,4.7C82.3,59.1,76.9,58.3,71.4,57.6z"/></g></svg>
|
||||||
|
After Width: | Height: | Size: 2.2 KiB |
73
AppImageBuilder.yml
Normal file
73
AppImageBuilder.yml
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
version: 1
|
||||||
|
AppDir:
|
||||||
|
path: ./AppDir
|
||||||
|
app_info:
|
||||||
|
id: io.filecoin.lotus
|
||||||
|
name: Lotus
|
||||||
|
icon: icon
|
||||||
|
version: latest
|
||||||
|
exec: usr/bin/lotus
|
||||||
|
exec_args: $@
|
||||||
|
apt:
|
||||||
|
arch: amd64
|
||||||
|
allow_unauthenticated: true
|
||||||
|
sources:
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal main restricted
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates main restricted
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal universe
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates universe
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal multiverse
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-updates multiverse
|
||||||
|
- sourceline: deb http://archive.ubuntu.com/ubuntu/ focal-backports main restricted
|
||||||
|
universe multiverse
|
||||||
|
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security main restricted
|
||||||
|
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security universe
|
||||||
|
- sourceline: deb http://security.ubuntu.com/ubuntu focal-security multiverse
|
||||||
|
- sourceline: deb https://cli-assets.heroku.com/apt ./
|
||||||
|
- sourceline: deb http://ppa.launchpad.net/openjdk-r/ppa/ubuntu focal main
|
||||||
|
- sourceline: deb http://ppa.launchpad.net/git-core/ppa/ubuntu focal main
|
||||||
|
- sourceline: deb http://archive.canonical.com/ubuntu focal partner
|
||||||
|
include:
|
||||||
|
- ocl-icd-libopencl1
|
||||||
|
- libhwloc15
|
||||||
|
exclude: []
|
||||||
|
files:
|
||||||
|
include:
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libgcc_s.so.1
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libpthread-2.31.so
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libm-2.31.so
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libdl-2.31.so
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libc-2.31.so
|
||||||
|
- /usr/lib/x86_64-linux-gnu/libudev.so.1.6.17
|
||||||
|
exclude:
|
||||||
|
- usr/share/man
|
||||||
|
- usr/share/doc/*/README.*
|
||||||
|
- usr/share/doc/*/changelog.*
|
||||||
|
- usr/share/doc/*/NEWS.*
|
||||||
|
- usr/share/doc/*/TODO.*
|
||||||
|
test:
|
||||||
|
fedora:
|
||||||
|
image: appimagecrafters/tests-env:fedora-30
|
||||||
|
command: ./AppRun
|
||||||
|
use_host_x: true
|
||||||
|
debian:
|
||||||
|
image: appimagecrafters/tests-env:debian-stable
|
||||||
|
command: ./AppRun
|
||||||
|
use_host_x: true
|
||||||
|
arch:
|
||||||
|
image: appimagecrafters/tests-env:archlinux-latest
|
||||||
|
command: ./AppRun
|
||||||
|
use_host_x: true
|
||||||
|
centos:
|
||||||
|
image: appimagecrafters/tests-env:centos-7
|
||||||
|
command: ./AppRun
|
||||||
|
use_host_x: true
|
||||||
|
ubuntu:
|
||||||
|
image: appimagecrafters/tests-env:ubuntu-xenial
|
||||||
|
command: ./AppRun
|
||||||
|
use_host_x: true
|
||||||
|
AppImage:
|
||||||
|
arch: x86_64
|
||||||
|
update-information: guess
|
||||||
|
sign-key: None
|
||||||
|
|
||||||
913
CHANGELOG.md
913
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
74
Dockerfile.lotus
Normal file
74
Dockerfile.lotus
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
FROM golang:1.16.4 AS builder-deps
|
||||||
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
|
||||||
|
|
||||||
|
ARG RUST_VERSION=nightly
|
||||||
|
ENV XDG_CACHE_HOME="/tmp"
|
||||||
|
|
||||||
|
ENV RUSTUP_HOME=/usr/local/rustup \
|
||||||
|
CARGO_HOME=/usr/local/cargo \
|
||||||
|
PATH=/usr/local/cargo/bin:$PATH
|
||||||
|
|
||||||
|
RUN wget "https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \
|
||||||
|
chmod +x rustup-init; \
|
||||||
|
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION; \
|
||||||
|
rm rustup-init; \
|
||||||
|
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||||
|
rustup --version; \
|
||||||
|
cargo --version; \
|
||||||
|
rustc --version;
|
||||||
|
|
||||||
|
|
||||||
|
FROM builder-deps AS builder-local
|
||||||
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
|
COPY ./ /opt/filecoin
|
||||||
|
WORKDIR /opt/filecoin
|
||||||
|
RUN make clean deps
|
||||||
|
|
||||||
|
|
||||||
|
FROM builder-local AS builder
|
||||||
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
|
WORKDIR /opt/filecoin
|
||||||
|
|
||||||
|
ARG RUSTFLAGS=""
|
||||||
|
ARG GOFLAGS=""
|
||||||
|
|
||||||
|
RUN make deps lotus lotus-miner lotus-worker lotus-shed lotus-chainwatch lotus-stats
|
||||||
|
|
||||||
|
|
||||||
|
FROM ubuntu:20.04 AS base
|
||||||
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
|
# Base resources
|
||||||
|
COPY --from=builder /etc/ssl/certs /etc/ssl/certs
|
||||||
|
COPY --from=builder /lib/x86_64-linux-gnu/libdl.so.2 /lib/
|
||||||
|
COPY --from=builder /lib/x86_64-linux-gnu/librt.so.1 /lib/
|
||||||
|
COPY --from=builder /lib/x86_64-linux-gnu/libgcc_s.so.1 /lib/
|
||||||
|
COPY --from=builder /lib/x86_64-linux-gnu/libutil.so.1 /lib/
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/libltdl.so.7 /lib/
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/libnuma.so.1 /lib/
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/libhwloc.so.5 /lib/
|
||||||
|
COPY --from=builder /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /lib/
|
||||||
|
|
||||||
|
RUN useradd -r -u 532 -U fc
|
||||||
|
|
||||||
|
|
||||||
|
FROM base AS lotus
|
||||||
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
|
COPY --from=builder /opt/filecoin/lotus /usr/local/bin/
|
||||||
|
COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||||
|
|
||||||
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
|
|
||||||
|
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters && chown fc /var/lib/lotus /var/tmp/filecoin-proof-parameters
|
||||||
|
|
||||||
|
USER fc
|
||||||
|
|
||||||
|
ENTRYPOINT ["/usr/local/bin/lotus"]
|
||||||
|
|
||||||
|
CMD ["-help"]
|
||||||
147
Makefile
147
Makefile
@ -5,10 +5,10 @@ all: build
|
|||||||
|
|
||||||
unexport GOFLAGS
|
unexport GOFLAGS
|
||||||
|
|
||||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
|
GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
|
||||||
ifeq ($(shell expr $(GOVERSION) \< 14), 1)
|
ifeq ($(shell expr $(GOVERSION) \< 1016000), 1)
|
||||||
$(warning Your Golang version is go 1.$(GOVERSION))
|
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
|
||||||
$(error Update Golang to version $(shell grep '^go' go.mod))
|
$(error Update Golang to version to at least 1.16.0)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# git modules that need to be loaded
|
# git modules that need to be loaded
|
||||||
@ -41,8 +41,13 @@ MODULES+=$(FFI_PATH)
|
|||||||
BUILD_DEPS+=build/.filecoin-install
|
BUILD_DEPS+=build/.filecoin-install
|
||||||
CLEAN+=build/.filecoin-install
|
CLEAN+=build/.filecoin-install
|
||||||
|
|
||||||
$(MODULES): build/.update-modules ;
|
ffi-version-check:
|
||||||
|
@[[ "$$(awk '/const Version/{print $$5}' extern/filecoin-ffi/version.go)" -eq 3 ]] || (echo "FFI version mismatch, update submodules"; exit 1)
|
||||||
|
BUILD_DEPS+=ffi-version-check
|
||||||
|
|
||||||
|
.PHONY: ffi-version-check
|
||||||
|
|
||||||
|
$(MODULES): build/.update-modules ;
|
||||||
# dummy file that marks the last time modules were updated
|
# dummy file that marks the last time modules were updated
|
||||||
build/.update-modules:
|
build/.update-modules:
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
@ -57,16 +62,30 @@ CLEAN+=build/.update-modules
|
|||||||
deps: $(BUILD_DEPS)
|
deps: $(BUILD_DEPS)
|
||||||
.PHONY: deps
|
.PHONY: deps
|
||||||
|
|
||||||
|
build-devnets: build lotus-seed lotus-shed lotus-wallet lotus-gateway
|
||||||
|
.PHONY: build-devnets
|
||||||
|
|
||||||
debug: GOFLAGS+=-tags=debug
|
debug: GOFLAGS+=-tags=debug
|
||||||
debug: lotus lotus-miner lotus-worker lotus-seed
|
debug: build-devnets
|
||||||
|
|
||||||
2k: GOFLAGS+=-tags=2k
|
2k: GOFLAGS+=-tags=2k
|
||||||
2k: lotus lotus-miner lotus-worker lotus-seed
|
2k: build-devnets
|
||||||
|
|
||||||
|
calibnet: GOFLAGS+=-tags=calibnet
|
||||||
|
calibnet: build-devnets
|
||||||
|
|
||||||
|
nerpanet: GOFLAGS+=-tags=nerpanet
|
||||||
|
nerpanet: build-devnets
|
||||||
|
|
||||||
|
butterflynet: GOFLAGS+=-tags=butterflynet
|
||||||
|
butterflynet: build-devnets
|
||||||
|
|
||||||
|
interopnet: GOFLAGS+=-tags=interopnet
|
||||||
|
interopnet: build-devnets
|
||||||
|
|
||||||
lotus: $(BUILD_DEPS)
|
lotus: $(BUILD_DEPS)
|
||||||
rm -f lotus
|
rm -f lotus
|
||||||
go build $(GOFLAGS) -o lotus ./cmd/lotus
|
go build $(GOFLAGS) -o lotus ./cmd/lotus
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build
|
|
||||||
|
|
||||||
.PHONY: lotus
|
.PHONY: lotus
|
||||||
BINS+=lotus
|
BINS+=lotus
|
||||||
@ -74,21 +93,18 @@ BINS+=lotus
|
|||||||
lotus-miner: $(BUILD_DEPS)
|
lotus-miner: $(BUILD_DEPS)
|
||||||
rm -f lotus-miner
|
rm -f lotus-miner
|
||||||
go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner
|
go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build
|
|
||||||
.PHONY: lotus-miner
|
.PHONY: lotus-miner
|
||||||
BINS+=lotus-miner
|
BINS+=lotus-miner
|
||||||
|
|
||||||
lotus-worker: $(BUILD_DEPS)
|
lotus-worker: $(BUILD_DEPS)
|
||||||
rm -f lotus-worker
|
rm -f lotus-worker
|
||||||
go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
|
go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build
|
|
||||||
.PHONY: lotus-worker
|
.PHONY: lotus-worker
|
||||||
BINS+=lotus-worker
|
BINS+=lotus-worker
|
||||||
|
|
||||||
lotus-shed: $(BUILD_DEPS)
|
lotus-shed: $(BUILD_DEPS)
|
||||||
rm -f lotus-shed
|
rm -f lotus-shed
|
||||||
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
|
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build
|
|
||||||
.PHONY: lotus-shed
|
.PHONY: lotus-shed
|
||||||
BINS+=lotus-shed
|
BINS+=lotus-shed
|
||||||
|
|
||||||
@ -115,12 +131,14 @@ install-miner:
|
|||||||
install-worker:
|
install-worker:
|
||||||
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
||||||
|
|
||||||
|
install-app:
|
||||||
|
install -C ./$(APP) /usr/local/bin/$(APP)
|
||||||
|
|
||||||
# TOOLS
|
# TOOLS
|
||||||
|
|
||||||
lotus-seed: $(BUILD_DEPS)
|
lotus-seed: $(BUILD_DEPS)
|
||||||
rm -f lotus-seed
|
rm -f lotus-seed
|
||||||
go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
|
go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build
|
|
||||||
|
|
||||||
.PHONY: lotus-seed
|
.PHONY: lotus-seed
|
||||||
BINS+=lotus-seed
|
BINS+=lotus-seed
|
||||||
@ -154,13 +172,11 @@ lotus-townhall-front:
|
|||||||
.PHONY: lotus-townhall-front
|
.PHONY: lotus-townhall-front
|
||||||
|
|
||||||
lotus-townhall-app: lotus-touch lotus-townhall-front
|
lotus-townhall-app: lotus-touch lotus-townhall-front
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build
|
|
||||||
.PHONY: lotus-townhall-app
|
.PHONY: lotus-townhall-app
|
||||||
|
|
||||||
lotus-fountain:
|
lotus-fountain:
|
||||||
rm -f lotus-fountain
|
rm -f lotus-fountain
|
||||||
go build -o lotus-fountain ./cmd/lotus-fountain
|
go build -o lotus-fountain ./cmd/lotus-fountain
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
|
||||||
.PHONY: lotus-fountain
|
.PHONY: lotus-fountain
|
||||||
BINS+=lotus-fountain
|
BINS+=lotus-fountain
|
||||||
|
|
||||||
@ -173,28 +189,24 @@ BINS+=lotus-chainwatch
|
|||||||
lotus-bench:
|
lotus-bench:
|
||||||
rm -f lotus-bench
|
rm -f lotus-bench
|
||||||
go build -o lotus-bench ./cmd/lotus-bench
|
go build -o lotus-bench ./cmd/lotus-bench
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build
|
|
||||||
.PHONY: lotus-bench
|
.PHONY: lotus-bench
|
||||||
BINS+=lotus-bench
|
BINS+=lotus-bench
|
||||||
|
|
||||||
lotus-stats:
|
lotus-stats:
|
||||||
rm -f lotus-stats
|
rm -f lotus-stats
|
||||||
go build -o lotus-stats ./cmd/lotus-stats
|
go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build
|
|
||||||
.PHONY: lotus-stats
|
.PHONY: lotus-stats
|
||||||
BINS+=lotus-stats
|
BINS+=lotus-stats
|
||||||
|
|
||||||
lotus-pcr:
|
lotus-pcr:
|
||||||
rm -f lotus-pcr
|
rm -f lotus-pcr
|
||||||
go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
|
go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build
|
|
||||||
.PHONY: lotus-pcr
|
.PHONY: lotus-pcr
|
||||||
BINS+=lotus-pcr
|
BINS+=lotus-pcr
|
||||||
|
|
||||||
lotus-health:
|
lotus-health:
|
||||||
rm -f lotus-health
|
rm -f lotus-health
|
||||||
go build -o lotus-health ./cmd/lotus-health
|
go build -o lotus-health ./cmd/lotus-health
|
||||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build
|
|
||||||
.PHONY: lotus-health
|
.PHONY: lotus-health
|
||||||
BINS+=lotus-health
|
BINS+=lotus-health
|
||||||
|
|
||||||
@ -204,14 +216,33 @@ lotus-wallet:
|
|||||||
.PHONY: lotus-wallet
|
.PHONY: lotus-wallet
|
||||||
BINS+=lotus-wallet
|
BINS+=lotus-wallet
|
||||||
|
|
||||||
|
lotus-keygen:
|
||||||
|
rm -f lotus-keygen
|
||||||
|
go build -o lotus-keygen ./cmd/lotus-keygen
|
||||||
|
.PHONY: lotus-keygen
|
||||||
|
BINS+=lotus-keygen
|
||||||
|
|
||||||
testground:
|
testground:
|
||||||
go build -tags testground -o /dev/null ./cmd/lotus
|
go build -tags testground -o /dev/null ./cmd/lotus
|
||||||
.PHONY: testground
|
.PHONY: testground
|
||||||
BINS+=testground
|
BINS+=testground
|
||||||
|
|
||||||
|
|
||||||
|
tvx:
|
||||||
|
rm -f tvx
|
||||||
|
go build -o tvx ./cmd/tvx
|
||||||
|
.PHONY: tvx
|
||||||
|
BINS+=tvx
|
||||||
|
|
||||||
install-chainwatch: lotus-chainwatch
|
install-chainwatch: lotus-chainwatch
|
||||||
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
|
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
|
||||||
|
|
||||||
|
lotus-sim: $(BUILD_DEPS)
|
||||||
|
rm -f lotus-sim
|
||||||
|
go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim
|
||||||
|
.PHONY: lotus-sim
|
||||||
|
BINS+=lotus-sim
|
||||||
|
|
||||||
# SYSTEMD
|
# SYSTEMD
|
||||||
|
|
||||||
install-daemon-service: install-daemon
|
install-daemon-service: install-daemon
|
||||||
@ -272,17 +303,10 @@ clean-services: clean-all-services
|
|||||||
|
|
||||||
buildall: $(BINS)
|
buildall: $(BINS)
|
||||||
|
|
||||||
completions:
|
|
||||||
./scripts/make-completions.sh lotus
|
|
||||||
./scripts/make-completions.sh lotus-miner
|
|
||||||
.PHONY: completions
|
|
||||||
|
|
||||||
install-completions:
|
install-completions:
|
||||||
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
|
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
|
||||||
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
||||||
install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner
|
|
||||||
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
||||||
install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner
|
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf $(CLEAN) $(BINS)
|
rm -rf $(CLEAN) $(BINS)
|
||||||
@ -294,17 +318,76 @@ dist-clean:
|
|||||||
git submodule deinit --all -f
|
git submodule deinit --all -f
|
||||||
.PHONY: dist-clean
|
.PHONY: dist-clean
|
||||||
|
|
||||||
type-gen:
|
type-gen: api-gen
|
||||||
go run ./gen/main.go
|
go run ./gen/main.go
|
||||||
go generate ./...
|
go generate -x ./...
|
||||||
|
goimports -w api/
|
||||||
|
|
||||||
method-gen:
|
method-gen: api-gen
|
||||||
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
|
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
|
||||||
|
|
||||||
gen: type-gen method-gen
|
actors-gen:
|
||||||
|
go run ./chain/actors/agen
|
||||||
|
go fmt ./...
|
||||||
|
|
||||||
docsgen:
|
api-gen:
|
||||||
go run ./api/docgen > documentation/en/api-methods.md
|
go run ./gen/api
|
||||||
|
goimports -w api
|
||||||
|
goimports -w api
|
||||||
|
.PHONY: api-gen
|
||||||
|
|
||||||
|
appimage: lotus
|
||||||
|
rm -rf appimage-builder-cache || true
|
||||||
|
rm AppDir/io.filecoin.lotus.desktop || true
|
||||||
|
rm AppDir/icon.svg || true
|
||||||
|
rm Appdir/AppRun || true
|
||||||
|
mkdir -p AppDir/usr/bin
|
||||||
|
cp ./lotus AppDir/usr/bin/
|
||||||
|
appimage-builder
|
||||||
|
|
||||||
|
docsgen: docsgen-md docsgen-openrpc
|
||||||
|
|
||||||
|
docsgen-md-bin: api-gen actors-gen
|
||||||
|
go build $(GOFLAGS) -o docgen-md ./api/docgen/cmd
|
||||||
|
docsgen-openrpc-bin: api-gen actors-gen
|
||||||
|
go build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
||||||
|
|
||||||
|
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
|
||||||
|
|
||||||
|
docsgen-md-full: docsgen-md-bin
|
||||||
|
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
||||||
|
./docgen-md "api/v0api/full.go" "FullNode" "v0api" "./api/v0api" > documentation/en/api-v0-methods.md
|
||||||
|
docsgen-md-storage: docsgen-md-bin
|
||||||
|
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
||||||
|
docsgen-md-worker: docsgen-md-bin
|
||||||
|
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
||||||
|
|
||||||
|
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker
|
||||||
|
|
||||||
|
docsgen-openrpc-full: docsgen-openrpc-bin
|
||||||
|
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
|
||||||
|
docsgen-openrpc-storage: docsgen-openrpc-bin
|
||||||
|
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
|
||||||
|
docsgen-openrpc-worker: docsgen-openrpc-bin
|
||||||
|
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
|
||||||
|
|
||||||
|
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
||||||
|
|
||||||
|
gen: actors-gen type-gen method-gen docsgen api-gen circleci
|
||||||
|
@echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli"
|
||||||
|
.PHONY: gen
|
||||||
|
|
||||||
|
snap: lotus lotus-miner lotus-worker
|
||||||
|
snapcraft
|
||||||
|
# snapcraft upload ./lotus_*.snap
|
||||||
|
|
||||||
|
# separate from gen because it needs binaries
|
||||||
|
docsgen-cli: lotus lotus-miner lotus-worker
|
||||||
|
python ./scripts/generate-lotus-cli.py
|
||||||
|
.PHONY: docsgen-cli
|
||||||
|
|
||||||
print-%:
|
print-%:
|
||||||
@echo $*=$($*)
|
@echo $*=$($*)
|
||||||
|
|
||||||
|
circleci:
|
||||||
|
go generate -x ./.circleci
|
||||||
125
README.md
125
README.md
@ -10,7 +10,7 @@
|
|||||||
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
||||||
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
||||||
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
||||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.7-blue.svg" /></a>
|
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.16-blue.svg" /></a>
|
||||||
<br>
|
<br>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@ -18,30 +18,121 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
|
|||||||
|
|
||||||
## Building & Documentation
|
## Building & Documentation
|
||||||
|
|
||||||
For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/).
|
> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases).
|
||||||
|
|
||||||
|
For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme.
|
||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
|
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
|
||||||
|
|
||||||
## Development
|
## Related packages
|
||||||
|
|
||||||
The main branches under development at the moment are:
|
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
|
||||||
* [`master`](https://github.com/filecoin-project/lotus): current testnet.
|
|
||||||
* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
|
|
||||||
* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits.
|
|
||||||
|
|
||||||
### Tracker
|
|
||||||
|
|
||||||
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch.
|
|
||||||
|
|
||||||
### Packages
|
|
||||||
|
|
||||||
The lotus Filecoin implementation unfolds into the following packages:
|
|
||||||
|
|
||||||
- [This repo](https://github.com/filecoin-project/lotus)
|
|
||||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
- [specs-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||||
|
|
||||||
|
## Contribute
|
||||||
|
|
||||||
|
Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations:
|
||||||
|
|
||||||
|
1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs).
|
||||||
|
2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted.
|
||||||
|
3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn.
|
||||||
|
|
||||||
|
When implementing a change:
|
||||||
|
|
||||||
|
1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`.
|
||||||
|
2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc.
|
||||||
|
3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go.
|
||||||
|
4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers.
|
||||||
|
5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted).
|
||||||
|
6. Add tests.
|
||||||
|
7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
|
||||||
|
8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
|
||||||
|
|
||||||
|
## Basic Build Instructions
|
||||||
|
**System-specific Software Dependencies**:
|
||||||
|
|
||||||
|
Building Lotus requires some system dependencies, usually provided by your distribution.
|
||||||
|
|
||||||
|
Ubuntu/Debian:
|
||||||
|
```
|
||||||
|
sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y
|
||||||
|
```
|
||||||
|
|
||||||
|
Fedora:
|
||||||
|
```
|
||||||
|
sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc libhwloc-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos)
|
||||||
|
|
||||||
|
#### Go
|
||||||
|
|
||||||
|
To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
|
||||||
|
```
|
||||||
|
|
||||||
|
**TIP:**
|
||||||
|
You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc
|
||||||
|
```
|
||||||
|
|
||||||
|
See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck.
|
||||||
|
|
||||||
|
### Build and install Lotus
|
||||||
|
|
||||||
|
Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`).
|
||||||
|
|
||||||
|
1. Clone the repository:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/filecoin-project/lotus.git
|
||||||
|
cd lotus/
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases).
|
||||||
|
|
||||||
|
2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases).
|
||||||
|
|
||||||
|
If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding.
|
||||||
|
|
||||||
|
For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git checkout <tag_or_branch>
|
||||||
|
# For example:
|
||||||
|
git checkout <vX.X.X> # tag for a release
|
||||||
|
```
|
||||||
|
|
||||||
|
Currently, the latest code on the _master_ branch corresponds to mainnet.
|
||||||
|
|
||||||
|
3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)".
|
||||||
|
4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed.
|
||||||
|
|
||||||
|
5. Build and install Lotus:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
make clean all #mainnet
|
||||||
|
|
||||||
|
# Or to join a testnet or devnet:
|
||||||
|
make clean calibnet # Calibration with min 32GiB sectors
|
||||||
|
make clean nerpanet # Nerpa with min 512MiB sectors
|
||||||
|
|
||||||
|
sudo make install
|
||||||
|
```
|
||||||
|
|
||||||
|
This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`.
|
||||||
|
|
||||||
|
`lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder.
|
||||||
|
|
||||||
|
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain).
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@ -2,11 +2,11 @@
|
|||||||
|
|
||||||
## Reporting a Vulnerability
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
For *critical* bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md
|
For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/).
|
||||||
|
|
||||||
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
|
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
|
||||||
|
|
||||||
Here are some examples of bugs we would consider 'critical':
|
Here are some examples of bugs we would consider to be security vulnerabilities:
|
||||||
|
|
||||||
* If you can spend from a `multisig` wallet you do not control the keys for.
|
* If you can spend from a `multisig` wallet you do not control the keys for.
|
||||||
* If you can cause a miner to be slashed without them actually misbehaving.
|
* If you can cause a miner to be slashed without them actually misbehaving.
|
||||||
@ -16,8 +16,8 @@ Here are some examples of bugs we would consider 'critical':
|
|||||||
* If you can craft a message that causes a persistent fork in the network.
|
* If you can craft a message that causes a persistent fork in the network.
|
||||||
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
|
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
|
||||||
|
|
||||||
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
|
This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, .
|
||||||
|
|
||||||
## Reporting a non security bug
|
## Reporting a non security bug
|
||||||
|
|
||||||
For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
|
For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
|
||||||
|
|||||||
14
api/README.md
Normal file
14
api/README.md
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
## Lotus API
|
||||||
|
|
||||||
|
This package contains all lotus API definitions. Interfaces defined here are
|
||||||
|
exposed as JsonRPC 2.0 endpoints by lotus programs.
|
||||||
|
|
||||||
|
### Versions
|
||||||
|
|
||||||
|
| File | Alias File | Interface | Exposed by | Version | HTTP Endpoint | Status | Docs
|
||||||
|
|------------------|-------------------|----------------|--------------------|---------|---------------|------------------------------|------
|
||||||
|
| `api_common.go` | `v0api/latest.go` | `Common` | lotus; lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods.md)
|
||||||
|
| `api_full.go` | `v1api/latest.go` | `FullNode` | lotus | v1 | `/rpc/v1` | Latest, **Work in progress** | [Methods](../documentation/en/api-v1-unstable-methods.md)
|
||||||
|
| `api_storage.go` | `v0api/latest.go` | `StorageMiner` | lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-miner.md)
|
||||||
|
| `api_worker.go` | `v0api/latest.go` | `Worker` | lotus-worker | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-worker.md)
|
||||||
|
| `v0api/full.go` | | `FullNode` | lotus | v0 | `/rpc/v0` | Stable | [Methods](../documentation/en/api-v0-methods.md)
|
||||||
@ -4,77 +4,61 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
|
||||||
"github.com/libp2p/go-libp2p-core/network"
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
|
||||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Common interface {
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
|
type Common interface {
|
||||||
// MethodGroup: Auth
|
// MethodGroup: Auth
|
||||||
|
|
||||||
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
|
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
|
||||||
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
|
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
|
||||||
|
|
||||||
// MethodGroup: Net
|
// MethodGroup: Log
|
||||||
|
|
||||||
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
|
LogList(context.Context) ([]string, error) //perm:write
|
||||||
NetPeers(context.Context) ([]peer.AddrInfo, error)
|
LogSetLevel(context.Context, string, string) error //perm:write
|
||||||
NetConnect(context.Context, peer.AddrInfo) error
|
|
||||||
NetAddrsListen(context.Context) (peer.AddrInfo, error)
|
|
||||||
NetDisconnect(context.Context, peer.ID) error
|
|
||||||
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
|
|
||||||
NetPubsubScores(context.Context) ([]PubsubScore, error)
|
|
||||||
NetAutoNatStatus(context.Context) (NatInfo, error)
|
|
||||||
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
|
|
||||||
|
|
||||||
// NetBandwidthStats returns statistics about the nodes total bandwidth
|
|
||||||
// usage and current rate across all peers and protocols.
|
|
||||||
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
|
|
||||||
|
|
||||||
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
|
|
||||||
// usage and current rate per peer
|
|
||||||
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
|
|
||||||
|
|
||||||
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
|
|
||||||
// usage and current rate per protocol
|
|
||||||
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
|
|
||||||
|
|
||||||
// MethodGroup: Common
|
// MethodGroup: Common
|
||||||
|
|
||||||
// ID returns peerID of libp2p node backing this API
|
|
||||||
ID(context.Context) (peer.ID, error)
|
|
||||||
|
|
||||||
// Version provides information about API provider
|
// Version provides information about API provider
|
||||||
Version(context.Context) (Version, error)
|
Version(context.Context) (APIVersion, error) //perm:read
|
||||||
|
|
||||||
LogList(context.Context) ([]string, error)
|
// Discover returns an OpenRPC document describing an RPC API.
|
||||||
LogSetLevel(context.Context, string, string) error
|
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
|
||||||
|
|
||||||
// trigger graceful shutdown
|
// trigger graceful shutdown
|
||||||
Shutdown(context.Context) error
|
Shutdown(context.Context) error //perm:admin
|
||||||
|
|
||||||
// Session returns a random UUID of api provider session
|
// Session returns a random UUID of api provider session
|
||||||
Session(context.Context) (uuid.UUID, error)
|
Session(context.Context) (uuid.UUID, error) //perm:read
|
||||||
|
|
||||||
Closing(context.Context) (<-chan struct{}, error)
|
Closing(context.Context) (<-chan struct{}, error) //perm:read
|
||||||
}
|
}
|
||||||
|
|
||||||
// Version provides various build-time information
|
// APIVersion provides various build-time information
|
||||||
type Version struct {
|
type APIVersion struct {
|
||||||
Version string
|
Version string
|
||||||
|
|
||||||
// APIVersion is a binary encoded semver version of the remote implementing
|
// APIVersion is a binary encoded semver version of the remote implementing
|
||||||
// this api
|
// this api
|
||||||
//
|
//
|
||||||
// See APIVersion in build/version.go
|
// See APIVersion in build/version.go
|
||||||
APIVersion build.Version
|
APIVersion Version
|
||||||
|
|
||||||
// TODO: git commit / os / genesis cid?
|
// TODO: git commit / os / genesis cid?
|
||||||
|
|
||||||
@ -82,11 +66,6 @@ type Version struct {
|
|||||||
BlockDelay uint64
|
BlockDelay uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v Version) String() string {
|
func (v APIVersion) String() string {
|
||||||
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
|
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
type NatInfo struct {
|
|
||||||
Reachability network.Reachability
|
|
||||||
PublicAddr string
|
|
||||||
}
|
|
||||||
|
|||||||
574
api/api_full.go
574
api/api_full.go
@ -2,17 +2,16 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
@ -20,20 +19,46 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
|
||||||
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
|
||||||
|
|
||||||
|
// ChainIO abstracts operations for accessing raw IPLD objects.
|
||||||
|
type ChainIO interface {
|
||||||
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||||
|
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
const LookbackNoLimit = abi.ChainEpoch(-1)
|
||||||
|
|
||||||
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
|
||||||
|
// you'll have to add those methods to interfaces in `api/v0api`
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
// FullNode API is a low-level interface to the Filecoin network full node
|
// FullNode API is a low-level interface to the Filecoin network full node
|
||||||
type FullNode interface {
|
type FullNode interface {
|
||||||
Common
|
Common
|
||||||
|
Net
|
||||||
|
|
||||||
// MethodGroup: Chain
|
// MethodGroup: Chain
|
||||||
// The Chain method group contains methods for interacting with the
|
// The Chain method group contains methods for interacting with the
|
||||||
@ -41,66 +66,81 @@ type FullNode interface {
|
|||||||
|
|
||||||
// ChainNotify returns channel with chain head updates.
|
// ChainNotify returns channel with chain head updates.
|
||||||
// First message is guaranteed to be of len == 1, and type == 'current'.
|
// First message is guaranteed to be of len == 1, and type == 'current'.
|
||||||
ChainNotify(context.Context) (<-chan []*HeadChange, error)
|
ChainNotify(context.Context) (<-chan []*HeadChange, error) //perm:read
|
||||||
|
|
||||||
// ChainHead returns the current head of the chain.
|
// ChainHead returns the current head of the chain.
|
||||||
ChainHead(context.Context) (*types.TipSet, error)
|
ChainHead(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
|
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||||
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||||
|
|
||||||
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||||
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||||
|
|
||||||
// ChainGetBlock returns the block specified by the given CID.
|
// ChainGetBlock returns the block specified by the given CID.
|
||||||
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
|
||||||
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
|
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
|
||||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
// ChainGetBlockMessages returns messages stored in the specified block.
|
// ChainGetBlockMessages returns messages stored in the specified block.
|
||||||
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
|
//
|
||||||
|
// Note: If there are multiple blocks in a tipset, it's likely that some
|
||||||
|
// messages will be duplicated. It's also possible for blocks in a tipset to have
|
||||||
|
// different messages from the same sender at the same nonce. When that happens,
|
||||||
|
// only the first message (in a block with lowest ticket) will be considered
|
||||||
|
// for execution
|
||||||
|
//
|
||||||
|
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
|
||||||
|
//
|
||||||
|
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
|
||||||
|
// Use ChainGetParentMessages, which will perform correct message deduplication
|
||||||
|
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) //perm:read
|
||||||
|
|
||||||
// ChainGetParentReceipts returns receipts for messages in parent tipset of
|
// ChainGetParentReceipts returns receipts for messages in parent tipset of
|
||||||
// the specified block.
|
// the specified block. The receipts in the list returned is one-to-one with the
|
||||||
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
|
// messages returned by a call to ChainGetParentMessages with the same blockCid.
|
||||||
|
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
|
||||||
|
|
||||||
// ChainGetParentMessages returns messages stored in parent tipset of the
|
// ChainGetParentMessages returns messages stored in parent tipset of the
|
||||||
// specified block.
|
// specified block.
|
||||||
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
|
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetMessagesInTipset returns message stores in current tipset
|
||||||
|
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]Message, error) //perm:read
|
||||||
|
|
||||||
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
|
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
|
||||||
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
|
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
|
||||||
// will be returned.
|
// will be returned.
|
||||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
|
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
|
||||||
// blockstore and returns raw bytes.
|
// blockstore and returns raw bytes.
|
||||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
|
||||||
|
|
||||||
// ChainDeleteObj deletes node referenced by the given CID
|
// ChainDeleteObj deletes node referenced by the given CID
|
||||||
ChainDeleteObj(context.Context, cid.Cid) error
|
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
|
||||||
|
|
||||||
// ChainHasObj checks if a given CID exists in the chain blockstore.
|
// ChainHasObj checks if a given CID exists in the chain blockstore.
|
||||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
|
||||||
|
|
||||||
// ChainStatObj returns statistics about the graph referenced by 'obj'.
|
// ChainStatObj returns statistics about the graph referenced by 'obj'.
|
||||||
// If 'base' is also specified, then the returned stat will be a diff
|
// If 'base' is also specified, then the returned stat will be a diff
|
||||||
// between the two objects.
|
// between the two objects.
|
||||||
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error)
|
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error) //perm:read
|
||||||
|
|
||||||
// ChainSetHead forcefully sets current chain head. Use with caution.
|
// ChainSetHead forcefully sets current chain head. Use with caution.
|
||||||
ChainSetHead(context.Context, types.TipSetKey) error
|
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
|
||||||
|
|
||||||
// ChainGetGenesis returns the genesis tipset.
|
// ChainGetGenesis returns the genesis tipset.
|
||||||
ChainGetGenesis(context.Context) (*types.TipSet, error)
|
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
// ChainTipSetWeight computes weight for the specified tipset.
|
// ChainTipSetWeight computes weight for the specified tipset.
|
||||||
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
|
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
|
ChainGetNode(ctx context.Context, p string) (*IpldObject, error) //perm:read
|
||||||
|
|
||||||
// ChainGetMessage reads a message referenced by the specified CID from the
|
// ChainGetMessage reads a message referenced by the specified CID from the
|
||||||
// chain blockstore.
|
// chain blockstore.
|
||||||
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
|
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
|
||||||
|
|
||||||
// ChainGetPath returns a set of revert/apply operations needed to get from
|
// ChainGetPath returns a set of revert/apply operations needed to get from
|
||||||
// one tipset to another, for example:
|
// one tipset to another, for example:
|
||||||
@ -115,14 +155,14 @@ type FullNode interface {
|
|||||||
// tRR
|
// tRR
|
||||||
//```
|
//```
|
||||||
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
||||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
|
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) //perm:read
|
||||||
|
|
||||||
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||||
// The exported chain data includes the header chain from the given tipset
|
// The exported chain data includes the header chain from the given tipset
|
||||||
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||||
// state trees.
|
// state trees.
|
||||||
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
|
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
|
||||||
|
|
||||||
// MethodGroup: Beacon
|
// MethodGroup: Beacon
|
||||||
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||||
@ -130,74 +170,74 @@ type FullNode interface {
|
|||||||
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
|
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
|
||||||
// the entry has not yet been produced, the call will block until the entry
|
// the entry has not yet been produced, the call will block until the entry
|
||||||
// becomes available
|
// becomes available
|
||||||
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error)
|
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
|
||||||
|
|
||||||
// GasEstimateFeeCap estimates gas fee cap
|
// GasEstimateFeeCap estimates gas fee cap
|
||||||
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error)
|
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
// GasEstimateGasLimit estimates gas used by the message and returns it.
|
// GasEstimateGasLimit estimates gas used by the message and returns it.
|
||||||
// It fails if message fails to execute.
|
// It fails if message fails to execute.
|
||||||
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error)
|
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
|
||||||
|
|
||||||
// GasEstimateGasPremium estimates what gas price should be used for a
|
// GasEstimateGasPremium estimates what gas price should be used for a
|
||||||
// message to have high likelihood of inclusion in `nblocksincl` epochs.
|
// message to have high likelihood of inclusion in `nblocksincl` epochs.
|
||||||
|
|
||||||
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
|
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
|
||||||
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
|
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
// GasEstimateMessageGas estimates gas values for unset message gas fields
|
// GasEstimateMessageGas estimates gas values for unset message gas fields
|
||||||
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error)
|
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
|
||||||
|
|
||||||
// MethodGroup: Sync
|
// MethodGroup: Sync
|
||||||
// The Sync method group contains methods for interacting with and
|
// The Sync method group contains methods for interacting with and
|
||||||
// observing the lotus sync service.
|
// observing the lotus sync service.
|
||||||
|
|
||||||
// SyncState returns the current status of the lotus sync system.
|
// SyncState returns the current status of the lotus sync system.
|
||||||
SyncState(context.Context) (*SyncState, error)
|
SyncState(context.Context) (*SyncState, error) //perm:read
|
||||||
|
|
||||||
// SyncSubmitBlock can be used to submit a newly created block to the.
|
// SyncSubmitBlock can be used to submit a newly created block to the.
|
||||||
// network through this node
|
// network through this node
|
||||||
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
|
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
|
||||||
|
|
||||||
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
|
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
|
||||||
// yet synced block headers.
|
// yet synced block headers.
|
||||||
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
|
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
|
||||||
|
|
||||||
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
|
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
|
||||||
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
|
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
|
||||||
|
|
||||||
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
||||||
// Use with extreme caution.
|
// Use with extreme caution.
|
||||||
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
|
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
|
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
|
||||||
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
|
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
|
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
|
||||||
SyncUnmarkAllBad(ctx context.Context) error
|
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
|
||||||
|
|
||||||
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
||||||
// the reason.
|
// the reason.
|
||||||
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
|
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
|
||||||
|
|
||||||
// SyncValidateTipset indicates whether the provided tipset is valid or not
|
// SyncValidateTipset indicates whether the provided tipset is valid or not
|
||||||
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error)
|
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
|
||||||
|
|
||||||
// MethodGroup: Mpool
|
// MethodGroup: Mpool
|
||||||
// The Mpool methods are for interacting with the message pool. The message pool
|
// The Mpool methods are for interacting with the message pool. The message pool
|
||||||
// manages all incoming and outgoing 'messages' going over the network.
|
// manages all incoming and outgoing 'messages' going over the network.
|
||||||
|
|
||||||
// MpoolPending returns pending mempool messages.
|
// MpoolPending returns pending mempool messages.
|
||||||
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
|
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
|
||||||
|
|
||||||
// MpoolSelect returns a list of pending messages for inclusion in the next block
|
// MpoolSelect returns a list of pending messages for inclusion in the next block
|
||||||
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error)
|
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
|
||||||
|
|
||||||
// MpoolPush pushes a signed message to mempool.
|
// MpoolPush pushes a signed message to mempool.
|
||||||
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
|
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
|
||||||
|
|
||||||
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
|
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
|
||||||
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error)
|
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
|
||||||
|
|
||||||
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
|
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
|
||||||
// to mempool.
|
// to mempool.
|
||||||
@ -205,34 +245,41 @@ type FullNode interface {
|
|||||||
//
|
//
|
||||||
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
|
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
|
||||||
// based on current chain conditions
|
// based on current chain conditions
|
||||||
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error)
|
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error) //perm:sign
|
||||||
|
|
||||||
// MpoolBatchPush batch pushes a signed message to mempool.
|
// MpoolBatchPush batch pushes a signed message to mempool.
|
||||||
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
|
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
|
||||||
|
|
||||||
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
|
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
|
||||||
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error)
|
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
|
||||||
|
|
||||||
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
|
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
|
||||||
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error)
|
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
|
||||||
|
|
||||||
|
// MpoolCheckMessages performs logical checks on a batch of messages
|
||||||
|
MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read
|
||||||
|
// MpoolCheckPendingMessages performs logical checks for all pending messages from a given address
|
||||||
|
MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read
|
||||||
|
// MpoolCheckReplaceMessages performs logical checks on pending messages with replacement
|
||||||
|
MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read
|
||||||
|
|
||||||
// MpoolGetNonce gets next nonce for the specified sender.
|
// MpoolGetNonce gets next nonce for the specified sender.
|
||||||
// Note that this method may not be atomic. Use MpoolPushMessage instead.
|
// Note that this method may not be atomic. Use MpoolPushMessage instead.
|
||||||
MpoolGetNonce(context.Context, address.Address) (uint64, error)
|
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
|
||||||
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
|
MpoolSub(context.Context) (<-chan MpoolUpdate, error) //perm:read
|
||||||
|
|
||||||
// MpoolClear clears pending messages from the mpool
|
// MpoolClear clears pending messages from the mpool
|
||||||
MpoolClear(context.Context, bool) error
|
MpoolClear(context.Context, bool) error //perm:write
|
||||||
|
|
||||||
// MpoolGetConfig returns (a copy of) the current mpool config
|
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||||
MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
|
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
|
||||||
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
||||||
MpoolSetConfig(context.Context, *types.MpoolConfig) error
|
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
|
||||||
|
|
||||||
// MethodGroup: Miner
|
// MethodGroup: Miner
|
||||||
|
|
||||||
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error)
|
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) //perm:read
|
||||||
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error)
|
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) //perm:write
|
||||||
|
|
||||||
// // UX ?
|
// // UX ?
|
||||||
|
|
||||||
@ -241,32 +288,32 @@ type FullNode interface {
|
|||||||
// WalletNew creates a new address in the wallet with the given sigType.
|
// WalletNew creates a new address in the wallet with the given sigType.
|
||||||
// Available key types: bls, secp256k1, secp256k1-ledger
|
// Available key types: bls, secp256k1, secp256k1-ledger
|
||||||
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
|
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
|
||||||
WalletNew(context.Context, types.KeyType) (address.Address, error)
|
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
|
||||||
// WalletHas indicates whether the given address is in the wallet.
|
// WalletHas indicates whether the given address is in the wallet.
|
||||||
WalletHas(context.Context, address.Address) (bool, error)
|
WalletHas(context.Context, address.Address) (bool, error) //perm:write
|
||||||
// WalletList lists all the addresses in the wallet.
|
// WalletList lists all the addresses in the wallet.
|
||||||
WalletList(context.Context) ([]address.Address, error)
|
WalletList(context.Context) ([]address.Address, error) //perm:write
|
||||||
// WalletBalance returns the balance of the given address at the current head of the chain.
|
// WalletBalance returns the balance of the given address at the current head of the chain.
|
||||||
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
|
||||||
// WalletSign signs the given bytes using the given address.
|
// WalletSign signs the given bytes using the given address.
|
||||||
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error)
|
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
|
||||||
// WalletSignMessage signs the given message using the given address.
|
// WalletSignMessage signs the given message using the given address.
|
||||||
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
|
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
|
||||||
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
|
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
|
||||||
// The address does not have to be in the wallet.
|
// The address does not have to be in the wallet.
|
||||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error)
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
WalletDefaultAddress(context.Context) (address.Address, error)
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
// WalletSetDefault marks the given address as as the default one.
|
// WalletSetDefault marks the given address as as the default one.
|
||||||
WalletSetDefault(context.Context, address.Address) error
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
// WalletExport returns the private key of an address in the wallet.
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
|
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
|
||||||
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
|
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
|
||||||
// WalletDelete deletes an address from the wallet.
|
// WalletDelete deletes an address from the wallet.
|
||||||
WalletDelete(context.Context, address.Address) error
|
WalletDelete(context.Context, address.Address) error //perm:admin
|
||||||
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
|
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
|
||||||
WalletValidateAddress(context.Context, string) (address.Address, error)
|
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
|
||||||
|
|
||||||
// Other
|
// Other
|
||||||
|
|
||||||
@ -275,198 +322,290 @@ type FullNode interface {
|
|||||||
// retrieval markets as a client
|
// retrieval markets as a client
|
||||||
|
|
||||||
// ClientImport imports file under the specified path into filestore.
|
// ClientImport imports file under the specified path into filestore.
|
||||||
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error)
|
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
|
||||||
// ClientRemoveImport removes file import
|
// ClientRemoveImport removes file import
|
||||||
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error
|
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
|
||||||
// ClientStartDeal proposes a deal with a miner.
|
// ClientStartDeal proposes a deal with a miner.
|
||||||
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
|
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
|
||||||
|
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||||
|
ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
|
||||||
// ClientGetDealInfo returns the latest information about a given deal.
|
// ClientGetDealInfo returns the latest information about a given deal.
|
||||||
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
|
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
|
||||||
// ClientListDeals returns information about the deals made by the local client.
|
// ClientListDeals returns information about the deals made by the local client.
|
||||||
ClientListDeals(ctx context.Context) ([]DealInfo, error)
|
ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
|
||||||
// ClientGetDealUpdates returns the status of updated deals
|
// ClientGetDealUpdates returns the status of updated deals
|
||||||
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
|
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
|
||||||
// ClientGetDealStatus returns status given a code
|
// ClientGetDealStatus returns status given a code
|
||||||
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error)
|
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
|
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||||
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error)
|
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
|
||||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error)
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
|
||||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
|
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin
|
||||||
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||||
// of status updates.
|
// of status updates.
|
||||||
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
|
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
||||||
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
|
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
|
||||||
|
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||||
|
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
|
||||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error)
|
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
|
||||||
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||||
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error)
|
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
|
||||||
// ClientCalcCommP calculates the CommP for a specified file
|
// ClientCalcCommP calculates the CommP for a specified file
|
||||||
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
|
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
|
||||||
// ClientGenCar generates a CAR file for the specified file.
|
// ClientGenCar generates a CAR file for the specified file.
|
||||||
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
|
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
|
||||||
// ClientDealSize calculates real deal data size
|
// ClientDealSize calculates real deal data size
|
||||||
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error)
|
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
|
||||||
// ClientListTransfers returns the status of all ongoing transfers of data
|
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||||
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||||
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||||
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
|
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
|
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||||
// which are stuck due to insufficient funds
|
// which are stuck due to insufficient funds
|
||||||
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
|
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||||
|
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||||
|
|
||||||
// ClientUnimport removes references to the specified file from filestore
|
// ClientUnimport removes references to the specified file from filestore
|
||||||
//ClientUnimport(path string)
|
//ClientUnimport(path string)
|
||||||
|
|
||||||
// ClientListImports lists imported files and their root CIDs
|
// ClientListImports lists imported files and their root CIDs
|
||||||
ClientListImports(ctx context.Context) ([]Import, error)
|
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
|
||||||
|
|
||||||
//ClientListAsks() []Ask
|
//ClientListAsks() []Ask
|
||||||
|
|
||||||
// MethodGroup: State
|
// MethodGroup: State
|
||||||
// The State methods are used to query, inspect, and interact with chain state.
|
// The State methods are used to query, inspect, and interact with chain state.
|
||||||
// Most methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
|
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||||
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
|
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
|
||||||
|
|
||||||
// StateCall runs the given message and returns its result without any persisted changes.
|
// StateCall runs the given message and returns its result without any persisted changes.
|
||||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
|
//
|
||||||
|
// StateCall applies the message to the tipset's parent state. The
|
||||||
|
// message is not applied on-top-of the messages in the passed-in
|
||||||
|
// tipset.
|
||||||
|
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) //perm:read
|
||||||
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
// If no tipset key is provided, the appropriate tipset is looked up.
|
//
|
||||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
// If a tipset key is provided, and a replacing message is found on chain,
|
||||||
|
// the method will return an error saying that the message wasn't found
|
||||||
|
//
|
||||||
|
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
|
// the message was gas-repriced, the on-chain message will be replayed - in
|
||||||
|
// that case the returned InvocResult.MsgCid will not match the Cid param
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) //perm:read
|
||||||
// StateGetActor returns the indicated actor's nonce and balance.
|
// StateGetActor returns the indicated actor's nonce and balance.
|
||||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
|
||||||
// StateReadState returns the indicated actor's state.
|
// StateReadState returns the indicated actor's state.
|
||||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
|
||||||
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
|
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
|
||||||
StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error)
|
StateListMessages(ctx context.Context, match *MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
|
||||||
|
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
|
||||||
|
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
|
||||||
|
|
||||||
// StateNetworkName returns the name of the network the node is synced to
|
// StateNetworkName returns the name of the network the node is synced to
|
||||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
|
||||||
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
||||||
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
|
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
|
||||||
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
||||||
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
|
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
|
||||||
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
||||||
// and returns the deadline-related calculations.
|
// and returns the deadline-related calculations.
|
||||||
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
|
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
|
||||||
// StateMinerPower returns the power of the indicated miner
|
// StateMinerPower returns the power of the indicated miner
|
||||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) //perm:read
|
||||||
// StateMinerInfo returns info about the indicated miner
|
// StateMinerInfo returns info about the indicated miner
|
||||||
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
|
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
|
||||||
// StateMinerDeadlines returns all the proving deadlines for the given miner
|
// StateMinerDeadlines returns all the proving deadlines for the given miner
|
||||||
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
|
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error) //perm:read
|
||||||
// StateMinerPartitions returns all partitions in the specified deadline
|
// StateMinerPartitions returns all partitions in the specified deadline
|
||||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error)
|
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error) //perm:read
|
||||||
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
||||||
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
|
||||||
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
||||||
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
|
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) //perm:read
|
||||||
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
||||||
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
|
||||||
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
||||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
||||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
||||||
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
// StateMinerSectorAllocated checks if a sector is allocated
|
// StateMinerSectorAllocated checks if a sector is allocated
|
||||||
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error)
|
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
|
||||||
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
|
||||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||||
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||||
// expiration epoch
|
// expiration epoch
|
||||||
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
|
||||||
// StateSectorExpiration returns epoch at which given sector will expire
|
// StateSectorExpiration returns epoch at which given sector will expire
|
||||||
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error)
|
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
|
||||||
// StateSectorPartition finds deadline/partition with the specified sector
|
// StateSectorPartition finds deadline/partition with the specified sector
|
||||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
|
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
|
||||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
// StateSearchMsg looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
|
||||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
//
|
||||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
// message arrives on chain, and gets to the indicated confidence depth.
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
// StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
|
||||||
|
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
|
||||||
|
// set to true, both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
|
||||||
|
// StateWaitMsg looks back up to limit epochs in the chain for a message.
|
||||||
// If not found, it blocks until the message arrives on chain, and gets to the
|
// If not found, it blocks until the message arrives on chain, and gets to the
|
||||||
// indicated confidence depth.
|
// indicated confidence depth.
|
||||||
StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*MsgLookup, error)
|
//
|
||||||
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they must check that MsgLookup.Message is equal to the provided 'cid', or set the
|
||||||
|
// `allowReplaced` parameter to false. Without this check, and with `allowReplaced`
|
||||||
|
// set to true, both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) //perm:read
|
||||||
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
|
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
|
||||||
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
|
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
|
||||||
// StateListActors returns the addresses of every actor in the state
|
// StateListActors returns the addresses of every actor in the state
|
||||||
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error)
|
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
|
||||||
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
|
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
|
||||||
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error)
|
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) //perm:read
|
||||||
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
|
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
|
||||||
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error)
|
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) //perm:read
|
||||||
// StateMarketDeals returns information about every deal in the Storage Market
|
// StateMarketDeals returns information about every deal in the Storage Market
|
||||||
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error)
|
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) //perm:read
|
||||||
// StateMarketStorageDeal returns information about the indicated deal
|
// StateMarketStorageDeal returns information about the indicated deal
|
||||||
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error)
|
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) //perm:read
|
||||||
// StateLookupID retrieves the ID address of the given address
|
// StateLookupID retrieves the ID address of the given address
|
||||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
|
||||||
// StateAccountKey returns the public key address of the given ID address
|
// StateAccountKey returns the public key address of the given ID address
|
||||||
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
|
||||||
// StateChangedActors returns all the actors whose states change between the two given state CIDs
|
// StateChangedActors returns all the actors whose states change between the two given state CIDs
|
||||||
// TODO: Should this take tipset keys instead?
|
// TODO: Should this take tipset keys instead?
|
||||||
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error)
|
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
|
||||||
// StateGetReceipt returns the message receipt for the given message
|
|
||||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
|
||||||
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
||||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) //perm:read
|
||||||
// StateCompute is a flexible command that applies the given messages on the given tipset.
|
// StateCompute is a flexible command that applies the given messages on the given tipset.
|
||||||
// The messages are run as though the VM were at the provided height.
|
// The messages are run as though the VM were at the provided height.
|
||||||
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
|
//
|
||||||
|
// When called, StateCompute will:
|
||||||
|
// - Load the provided tipset, or use the current chain head if not provided
|
||||||
|
// - Compute the tipset state of the provided tipset on top of the parent state
|
||||||
|
// - (note that this step runs before vmheight is applied to the execution)
|
||||||
|
// - Execute state upgrade if any were scheduled at the epoch, or in null
|
||||||
|
// blocks preceding the tipset
|
||||||
|
// - Call the cron actor on null blocks preceding the tipset
|
||||||
|
// - For each block in the tipset
|
||||||
|
// - Apply messages in blocks in the specified
|
||||||
|
// - Award block reward by calling the reward actor
|
||||||
|
// - Call the cron actor for the current epoch
|
||||||
|
// - If the specified vmheight is higher than the current epoch, apply any
|
||||||
|
// needed state upgrades to the state
|
||||||
|
// - Apply the specified messages to the state
|
||||||
|
//
|
||||||
|
// The vmheight parameter sets VM execution epoch, and can be used to simulate
|
||||||
|
// message execution in different network versions. If the specified vmheight
|
||||||
|
// epoch is higher than the epoch of the specified tipset, any state upgrades
|
||||||
|
// until the vmheight will be executed on the state before applying messages
|
||||||
|
// specified by the user.
|
||||||
|
//
|
||||||
|
// Note that the initial tipset state computation is not affected by the
|
||||||
|
// vmheight parameter - only the messages in the `apply` set are
|
||||||
|
//
|
||||||
|
// If the caller wants to simply compute the state, vmheight should be set to
|
||||||
|
// the epoch of the specified tipset.
|
||||||
|
//
|
||||||
|
// Messages in the `apply` parameter must have the correct nonces, and gas
|
||||||
|
// values set.
|
||||||
|
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) //perm:read
|
||||||
// StateVerifierStatus returns the data cap for the given address.
|
// StateVerifierStatus returns the data cap for the given address.
|
||||||
// Returns nil if there is no entry in the data cap table for the
|
// Returns nil if there is no entry in the data cap table for the
|
||||||
// address.
|
// address.
|
||||||
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
|
||||||
// StateVerifiedClientStatus returns the data cap for the given address.
|
// StateVerifiedClientStatus returns the data cap for the given address.
|
||||||
// Returns nil if there is no entry in the data cap table for the
|
// Returns nil if there is no entry in the data cap table for the
|
||||||
// address.
|
// address.
|
||||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
|
||||||
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
|
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
|
||||||
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error)
|
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
|
||||||
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
|
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
|
||||||
// can issue. It takes the deal size and verified status as parameters.
|
// can issue. It takes the deal size and verified status as parameters.
|
||||||
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
|
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error) //perm:read
|
||||||
|
|
||||||
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
|
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
|
||||||
// This is not used anywhere in the protocol itself, and is only for external consumption.
|
// This is not used anywhere in the protocol itself, and is only for external consumption.
|
||||||
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error)
|
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
|
||||||
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
|
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
|
||||||
// This is the value reported by the runtime interface to actors code.
|
// This is the value reported by the runtime interface to actors code.
|
||||||
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error)
|
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (CirculatingSupply, error) //perm:read
|
||||||
// StateNetworkVersion returns the network version at the given tipset
|
// StateNetworkVersion returns the network version at the given tipset
|
||||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
|
||||||
|
|
||||||
// MethodGroup: Msig
|
// MethodGroup: Msig
|
||||||
// The Msig methods are used to interact with multisig wallets on the
|
// The Msig methods are used to interact with multisig wallets on the
|
||||||
// filecoin network
|
// filecoin network
|
||||||
|
|
||||||
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
||||||
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
// MsigGetVestingSchedule returns the vesting details of a given multisig.
|
// MsigGetVestingSchedule returns the vesting details of a given multisig.
|
||||||
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error)
|
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (MsigVesting, error) //perm:read
|
||||||
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
|
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
|
||||||
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||||
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
|
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
|
//MsigGetPending returns pending transactions for the given multisig
|
||||||
|
//wallet. Once pending transactions are fully approved, they will no longer
|
||||||
|
//appear here.
|
||||||
|
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error) //perm:read
|
||||||
|
|
||||||
// MsigCreate creates a multisig wallet
|
// MsigCreate creates a multisig wallet
|
||||||
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||||
//<initial balance>, <sender address of the create msg>, <gas price>
|
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||||
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error)
|
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigPropose proposes a multisig message
|
// MsigPropose proposes a multisig message
|
||||||
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigApprove approves a previously-proposed multisig message by transaction ID
|
// MsigApprove approves a previously-proposed multisig message by transaction ID
|
||||||
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
|
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
|
||||||
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error)
|
MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
|
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
|
||||||
// using both transaction ID and a hash of the parameters used in the
|
// using both transaction ID and a hash of the parameters used in the
|
||||||
@ -474,72 +613,91 @@ type FullNode interface {
|
|||||||
// exactly the transaction you think you are.
|
// exactly the transaction you think you are.
|
||||||
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigCancel cancels a previously-proposed multisig message
|
// MsigCancel cancels a previously-proposed multisig message
|
||||||
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
|
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
|
||||||
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigAddPropose proposes adding a signer in the multisig
|
// MsigAddPropose proposes adding a signer in the multisig
|
||||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
// <new signer>, <whether the number of required signers should be increased>
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
|
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigAddApprove approves a previously proposed AddSigner message
|
// MsigAddApprove approves a previously proposed AddSigner message
|
||||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
|
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
|
||||||
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
|
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigAddCancel cancels a previously proposed AddSigner message
|
// MsigAddCancel cancels a previously proposed AddSigner message
|
||||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
// <new signer>, <whether the number of required signers should be increased>
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
|
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
||||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
// <old signer>, <new signer>
|
// <old signer>, <new signer>
|
||||||
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigSwapApprove approves a previously proposed SwapSigner
|
// MsigSwapApprove approves a previously proposed SwapSigner
|
||||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
// <proposer address>, <old signer>, <new signer>
|
// <proposer address>, <old signer>, <new signer>
|
||||||
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
||||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
// <old signer>, <new signer>
|
// <old signer>, <new signer>
|
||||||
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
|
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
// MsigRemoveSigner proposes the removal of a signer from the multisig.
|
// MsigRemoveSigner proposes the removal of a signer from the multisig.
|
||||||
// It accepts the multisig to make the change on, the proposer address to
|
// It accepts the multisig to make the change on, the proposer address to
|
||||||
// send the message from, the address to be removed, and a boolean
|
// send the message from, the address to be removed, and a boolean
|
||||||
// indicating whether or not the signing threshold should be lowered by one
|
// indicating whether or not the signing threshold should be lowered by one
|
||||||
// along with the address removal.
|
// along with the address removal.
|
||||||
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error)
|
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign
|
||||||
|
|
||||||
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
// MarketAddBalance adds funds to the market actor
|
||||||
// MarketFreeBalance
|
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
// MarketGetReserved gets the amount of funds that are currently reserved for the address
|
||||||
|
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
|
||||||
|
// MarketReserveFunds reserves funds for a deal
|
||||||
|
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||||
|
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
|
||||||
|
// MarketWithdraw withdraws unlocked funds from the market actor
|
||||||
|
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
// MethodGroup: Paych
|
// MethodGroup: Paych
|
||||||
// The Paych methods are for interacting with and managing payment channels
|
// The Paych methods are for interacting with and managing payment channels
|
||||||
|
|
||||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error) //perm:sign
|
||||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
|
||||||
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
|
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error) //perm:sign
|
||||||
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
|
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error) //perm:sign
|
||||||
PaychList(context.Context) ([]address.Address, error)
|
PaychList(context.Context) ([]address.Address, error) //perm:read
|
||||||
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
PaychStatus(context.Context, address.Address) (*PaychStatus, error) //perm:read
|
||||||
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
|
||||||
PaychCollect(context.Context, address.Address) (cid.Cid, error)
|
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
|
||||||
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error)
|
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
|
||||||
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
|
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) //perm:sign
|
||||||
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
|
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
|
||||||
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
|
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
|
||||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error)
|
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error) //perm:sign
|
||||||
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
|
||||||
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
|
||||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MethodGroup: Node
|
||||||
|
// These methods are general node management and status commands
|
||||||
|
|
||||||
|
NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read
|
||||||
|
|
||||||
// CreateBackup creates node backup onder the specified file name. The
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
// method requires that the lotus daemon is running with the
|
// method requires that the lotus daemon is running with the
|
||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileRef struct {
|
type FileRef struct {
|
||||||
@ -574,6 +732,7 @@ type DealInfo struct {
|
|||||||
ProposalCid cid.Cid
|
ProposalCid cid.Cid
|
||||||
State storagemarket.StorageDealStatus
|
State storagemarket.StorageDealStatus
|
||||||
Message string // more information about deal state, particularly errors
|
Message string // more information about deal state, particularly errors
|
||||||
|
DealStages *storagemarket.DealStages
|
||||||
Provider address.Address
|
Provider address.Address
|
||||||
|
|
||||||
DataRef *storagemarket.DataRef
|
DataRef *storagemarket.DataRef
|
||||||
@ -587,6 +746,9 @@ type DealInfo struct {
|
|||||||
|
|
||||||
CreationTime time.Time
|
CreationTime time.Time
|
||||||
Verified bool
|
Verified bool
|
||||||
|
|
||||||
|
TransferChannelID *datatransfer.ChannelID
|
||||||
|
DataTransfer *DataTransferChannel
|
||||||
}
|
}
|
||||||
|
|
||||||
type MsgLookup struct {
|
type MsgLookup struct {
|
||||||
@ -624,6 +786,7 @@ type Message struct {
|
|||||||
|
|
||||||
type ActorState struct {
|
type ActorState struct {
|
||||||
Balance types.BigInt
|
Balance types.BigInt
|
||||||
|
Code cid.Cid
|
||||||
State interface{}
|
State interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -725,7 +888,7 @@ func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
|
|||||||
Client: client,
|
Client: client,
|
||||||
|
|
||||||
Miner: o.Miner,
|
Miner: o.Miner,
|
||||||
MinerPeer: o.MinerPeer,
|
MinerPeer: &o.MinerPeer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -744,6 +907,8 @@ type RetrievalOrder struct {
|
|||||||
Root cid.Cid
|
Root cid.Cid
|
||||||
Piece *cid.Cid
|
Piece *cid.Cid
|
||||||
Size uint64
|
Size uint64
|
||||||
|
|
||||||
|
LocalStore *multistore.StoreID // if specified, get data from local store
|
||||||
// TODO: support offset
|
// TODO: support offset
|
||||||
Total types.BigInt
|
Total types.BigInt
|
||||||
UnsealPrice types.BigInt
|
UnsealPrice types.BigInt
|
||||||
@ -751,7 +916,7 @@ type RetrievalOrder struct {
|
|||||||
PaymentIntervalIncrease uint64
|
PaymentIntervalIncrease uint64
|
||||||
Client address.Address
|
Client address.Address
|
||||||
Miner address.Address
|
Miner address.Address
|
||||||
MinerPeer retrievalmarket.RetrievalPeer
|
MinerPeer *retrievalmarket.RetrievalPeer
|
||||||
}
|
}
|
||||||
|
|
||||||
type InvocResult struct {
|
type InvocResult struct {
|
||||||
@ -781,14 +946,31 @@ type StartDealParams struct {
|
|||||||
VerifiedDeal bool
|
VerifiedDeal bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
|
||||||
|
type sdpAlias StartDealParams
|
||||||
|
|
||||||
|
sdp := sdpAlias{
|
||||||
|
FastRetrieval: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(raw, &sdp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = StartDealParams(sdp)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type IpldObject struct {
|
type IpldObject struct {
|
||||||
Cid cid.Cid
|
Cid cid.Cid
|
||||||
Obj interface{}
|
Obj interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ActiveSync struct {
|
type ActiveSync struct {
|
||||||
Base *types.TipSet
|
WorkerID uint64
|
||||||
Target *types.TipSet
|
Base *types.TipSet
|
||||||
|
Target *types.TipSet
|
||||||
|
|
||||||
Stage SyncStateStage
|
Stage SyncStateStage
|
||||||
Height abi.ChainEpoch
|
Height abi.ChainEpoch
|
||||||
@ -818,6 +1000,8 @@ const (
|
|||||||
|
|
||||||
func (v SyncStateStage) String() string {
|
func (v SyncStateStage) String() string {
|
||||||
switch v {
|
switch v {
|
||||||
|
case StageIdle:
|
||||||
|
return "idle"
|
||||||
case StageHeaders:
|
case StageHeaders:
|
||||||
return "header sync"
|
return "header sync"
|
||||||
case StagePersistHeaders:
|
case StagePersistHeaders:
|
||||||
@ -858,11 +1042,12 @@ type DealCollateralBounds struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CirculatingSupply struct {
|
type CirculatingSupply struct {
|
||||||
FilVested abi.TokenAmount
|
FilVested abi.TokenAmount
|
||||||
FilMined abi.TokenAmount
|
FilMined abi.TokenAmount
|
||||||
FilBurnt abi.TokenAmount
|
FilBurnt abi.TokenAmount
|
||||||
FilLocked abi.TokenAmount
|
FilLocked abi.TokenAmount
|
||||||
FilCirculating abi.TokenAmount
|
FilCirculating abi.TokenAmount
|
||||||
|
FilReserveDisbursed abi.TokenAmount
|
||||||
}
|
}
|
||||||
|
|
||||||
type MiningBaseInfo struct {
|
type MiningBaseInfo struct {
|
||||||
@ -916,7 +1101,8 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Deadline struct {
|
type Deadline struct {
|
||||||
PostSubmissions bitfield.BitField
|
PostSubmissions bitfield.BitField
|
||||||
|
DisputableProofCount uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type Partition struct {
|
type Partition struct {
|
||||||
@ -948,3 +1134,13 @@ type MessageMatch struct {
|
|||||||
To address.Address
|
To address.Address
|
||||||
From address.Address
|
From address.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MsigTransaction struct {
|
||||||
|
ID int64
|
||||||
|
To address.Address
|
||||||
|
Value abi.TokenAmount
|
||||||
|
Method abi.MethodNum
|
||||||
|
Params []byte
|
||||||
|
|
||||||
|
Approved []address.Address
|
||||||
|
}
|
||||||
|
|||||||
@ -8,13 +8,27 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/dline"
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
|
|
||||||
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GatewayAPI interface {
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
|
||||||
|
// you'll have to add those methods to interfaces in `api/v0api`
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
|
type Gateway interface {
|
||||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||||
ChainHead(ctx context.Context) (*types.TipSet, error)
|
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||||
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
||||||
@ -27,10 +41,11 @@ type GatewayAPI interface {
|
|||||||
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||||
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||||
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||||
|
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
|
||||||
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
|
||||||
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) //perm:read
|
||||||
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||||
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
||||||
@ -38,7 +53,11 @@ type GatewayAPI interface {
|
|||||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
|
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
|
||||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
|
||||||
|
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||||
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*MsgLookup, error)
|
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
|
||||||
|
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
|
||||||
|
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||||
|
Version(context.Context) (APIVersion, error)
|
||||||
}
|
}
|
||||||
|
|||||||
66
api/api_net.go
Normal file
66
api/api_net.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
|
"github.com/libp2p/go-libp2p-core/network"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
"github.com/libp2p/go-libp2p-core/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
|
type Net interface {
|
||||||
|
// MethodGroup: Net
|
||||||
|
|
||||||
|
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
|
||||||
|
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
|
||||||
|
NetConnect(context.Context, peer.AddrInfo) error //perm:write
|
||||||
|
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
|
||||||
|
NetDisconnect(context.Context, peer.ID) error //perm:write
|
||||||
|
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
|
||||||
|
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
|
||||||
|
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
|
||||||
|
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
|
||||||
|
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
|
||||||
|
|
||||||
|
// NetBandwidthStats returns statistics about the nodes total bandwidth
|
||||||
|
// usage and current rate across all peers and protocols.
|
||||||
|
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
|
||||||
|
|
||||||
|
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
|
||||||
|
// usage and current rate per peer
|
||||||
|
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
|
||||||
|
|
||||||
|
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
|
||||||
|
// usage and current rate per protocol
|
||||||
|
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
|
||||||
|
|
||||||
|
// ConnectionGater API
|
||||||
|
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
|
||||||
|
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
|
||||||
|
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
|
||||||
|
|
||||||
|
// ID returns peerID of libp2p node backing this API
|
||||||
|
ID(context.Context) (peer.ID, error) //perm:read
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommonNet interface {
|
||||||
|
Common
|
||||||
|
Net
|
||||||
|
}
|
||||||
|
|
||||||
|
type NatInfo struct {
|
||||||
|
Reachability network.Reachability
|
||||||
|
PublicAddr string
|
||||||
|
}
|
||||||
@ -5,118 +5,205 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
||||||
type StorageMiner interface {
|
type StorageMiner interface {
|
||||||
Common
|
Common
|
||||||
|
Net
|
||||||
|
|
||||||
ActorAddress(context.Context) (address.Address, error)
|
ActorAddress(context.Context) (address.Address, error) //perm:read
|
||||||
|
|
||||||
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error)
|
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
|
||||||
|
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
|
||||||
|
|
||||||
MiningBase(context.Context) (*types.TipSet, error)
|
MiningBase(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
// Temp api for testing
|
// Temp api for testing
|
||||||
PledgeSector(context.Context) error
|
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
|
||||||
|
|
||||||
// Get the status of a given sector by ID
|
// Get the status of a given sector by ID
|
||||||
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error)
|
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
|
||||||
|
|
||||||
|
// Add piece to an open sector. If no sectors with enough space are open,
|
||||||
|
// either a new sector will be created, or this call will block until more
|
||||||
|
// sectors can be created.
|
||||||
|
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storage.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
|
||||||
|
|
||||||
|
SectorsUnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
|
||||||
|
|
||||||
// List all staged sectors
|
// List all staged sectors
|
||||||
SectorsList(context.Context) ([]abi.SectorNumber, error)
|
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
|
||||||
|
|
||||||
SectorsRefs(context.Context) (map[string][]SealedRef, error)
|
// Get summary info of sectors
|
||||||
|
SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read
|
||||||
|
|
||||||
|
// List sectors in particular states
|
||||||
|
SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read
|
||||||
|
|
||||||
|
SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read
|
||||||
|
|
||||||
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
|
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
|
||||||
// to trigger sealing early
|
// to trigger sealing early
|
||||||
SectorStartSealing(context.Context, abi.SectorNumber) error
|
SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write
|
||||||
// SectorSetSealDelay sets the time that a newly-created sector
|
// SectorSetSealDelay sets the time that a newly-created sector
|
||||||
// waits for more deals before it starts sealing
|
// waits for more deals before it starts sealing
|
||||||
SectorSetSealDelay(context.Context, time.Duration) error
|
SectorSetSealDelay(context.Context, time.Duration) error //perm:write
|
||||||
// SectorGetSealDelay gets the time that a newly-created sector
|
// SectorGetSealDelay gets the time that a newly-created sector
|
||||||
// waits for more deals before it starts sealing
|
// waits for more deals before it starts sealing
|
||||||
SectorGetSealDelay(context.Context) (time.Duration, error)
|
SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read
|
||||||
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
|
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
|
||||||
SectorSetExpectedSealDuration(context.Context, time.Duration) error
|
SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write
|
||||||
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
|
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
|
||||||
SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
|
SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read
|
||||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
|
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
|
||||||
SectorRemove(context.Context, abi.SectorNumber) error
|
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
|
||||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
|
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
|
||||||
|
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
|
||||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
|
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
|
||||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
|
// automatically removes it from storage
|
||||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error)
|
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
|
||||||
|
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
|
||||||
|
// Returns null if message wasn't sent
|
||||||
|
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
|
||||||
|
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
|
||||||
|
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||||
|
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin
|
||||||
|
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
|
||||||
|
// Returns null if message wasn't sent
|
||||||
|
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
|
||||||
|
// SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
|
||||||
|
SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||||
|
// SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
|
||||||
|
// Returns null if message wasn't sent
|
||||||
|
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
|
||||||
|
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
|
||||||
|
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
|
||||||
|
|
||||||
// WorkerConnect tells the node to connect to workers RPC
|
// WorkerConnect tells the node to connect to workers RPC
|
||||||
WorkerConnect(context.Context, string) error
|
WorkerConnect(context.Context, string) error //perm:admin retry:true
|
||||||
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error)
|
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
|
||||||
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error)
|
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
|
||||||
storiface.WorkerReturn
|
|
||||||
|
//storiface.WorkerReturn
|
||||||
|
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
|
||||||
|
|
||||||
// SealingSchedDiag dumps internal sealing scheduler state
|
// SealingSchedDiag dumps internal sealing scheduler state
|
||||||
SealingSchedDiag(context.Context) (interface{}, error)
|
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
|
||||||
|
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
|
||||||
|
|
||||||
stores.SectorIndex
|
//stores.SectorIndex
|
||||||
|
StorageAttach(context.Context, stores.StorageInfo, fsutil.FsStat) error //perm:admin
|
||||||
|
StorageInfo(context.Context, stores.ID) (stores.StorageInfo, error) //perm:admin
|
||||||
|
StorageReportHealth(context.Context, stores.ID, stores.HealthReport) error //perm:admin
|
||||||
|
StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
|
||||||
|
StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
|
||||||
|
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) //perm:admin
|
||||||
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) //perm:admin
|
||||||
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
|
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin
|
||||||
|
|
||||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
|
StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin
|
||||||
MarketListDeals(ctx context.Context) ([]MarketDeal, error)
|
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
|
|
||||||
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
|
|
||||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
|
|
||||||
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
|
|
||||||
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
|
|
||||||
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error
|
|
||||||
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
|
|
||||||
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
|
||||||
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
|
||||||
// MinerRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
|
||||||
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
|
|
||||||
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
|
||||||
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error
|
|
||||||
|
|
||||||
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
|
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||||
DealsList(ctx context.Context) ([]MarketDeal, error)
|
MarketListDeals(ctx context.Context) ([]MarketDeal, error) //perm:read
|
||||||
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
|
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error) //perm:read
|
||||||
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
|
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
|
||||||
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
|
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
|
||||||
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
|
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
|
||||||
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
|
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
|
||||||
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
|
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
|
||||||
DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
|
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
|
||||||
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
|
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||||
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
|
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||||
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
|
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
|
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
|
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
|
||||||
|
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
|
||||||
|
|
||||||
StorageAddLocal(ctx context.Context, path string) error
|
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
|
||||||
|
DealsList(ctx context.Context) ([]MarketDeal, error) //perm:admin
|
||||||
|
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
|
||||||
|
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
|
||||||
|
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
|
||||||
PiecesListPieces(ctx context.Context) ([]cid.Cid, error)
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error)
|
|
||||||
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error)
|
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error)
|
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
|
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
|
||||||
|
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
|
||||||
|
|
||||||
// CreateBackup creates node backup onder the specified file name. The
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
// method requires that the lotus-miner is running with the
|
// method requires that the lotus-miner is running with the
|
||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
|
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin
|
||||||
|
|
||||||
|
ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ storiface.WorkerReturn = *new(StorageMiner)
|
||||||
|
var _ stores.SectorIndex = *new(StorageMiner)
|
||||||
|
|
||||||
type SealRes struct {
|
type SealRes struct {
|
||||||
Err string
|
Err string
|
||||||
GoErr error `json:"-"`
|
GoErr error `json:"-"`
|
||||||
@ -194,3 +281,54 @@ func (st *SealSeed) Equals(ost *SealSeed) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SectorState string
|
type SectorState string
|
||||||
|
|
||||||
|
type AddrUse int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PreCommitAddr AddrUse = iota
|
||||||
|
CommitAddr
|
||||||
|
DealPublishAddr
|
||||||
|
PoStAddr
|
||||||
|
|
||||||
|
TerminateSectorsAddr
|
||||||
|
)
|
||||||
|
|
||||||
|
type AddressConfig struct {
|
||||||
|
PreCommitControl []address.Address
|
||||||
|
CommitControl []address.Address
|
||||||
|
TerminateControl []address.Address
|
||||||
|
DealPublishControl []address.Address
|
||||||
|
|
||||||
|
DisableOwnerFallback bool
|
||||||
|
DisableWorkerFallback bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// PendingDealInfo has info about pending deals and when they are due to be
|
||||||
|
// published
|
||||||
|
type PendingDealInfo struct {
|
||||||
|
Deals []market.ClientDealProposal
|
||||||
|
PublishPeriodStart time.Time
|
||||||
|
PublishPeriod time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type SectorOffset struct {
|
||||||
|
Sector abi.SectorNumber
|
||||||
|
Offset abi.PaddedPieceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// DealInfo is a tuple of deal identity and its schedule
|
||||||
|
type PieceDealInfo struct {
|
||||||
|
PublishCid *cid.Cid
|
||||||
|
DealID abi.DealID
|
||||||
|
DealProposal *market.DealProposal
|
||||||
|
DealSchedule DealSchedule
|
||||||
|
KeepUnsealed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DealSchedule communicates the time interval of a storage deal. The deal must
|
||||||
|
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
|
||||||
|
// is invalid.
|
||||||
|
type DealSchedule struct {
|
||||||
|
StartEpoch abi.ChainEpoch
|
||||||
|
EndEpoch abi.ChainEpoch
|
||||||
|
}
|
||||||
|
|||||||
@ -37,6 +37,18 @@ func TestDoesntDependOnFFI(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDoesntDependOnBuild(t *testing.T) {
|
||||||
|
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
for _, pkg := range strings.Fields(string(deps)) {
|
||||||
|
if pkg == "github.com/filecoin-project/build" {
|
||||||
|
t.Fatal("api depends on filecoin-ffi")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestReturnTypes(t *testing.T) {
|
func TestReturnTypes(t *testing.T) {
|
||||||
errType := reflect.TypeOf(new(error)).Elem()
|
errType := reflect.TypeOf(new(error)).Elem()
|
||||||
bareIface := reflect.TypeOf(new(interface{})).Elem()
|
bareIface := reflect.TypeOf(new(interface{})).Elem()
|
||||||
@ -99,5 +111,11 @@ func TestReturnTypes(t *testing.T) {
|
|||||||
t.Run("common", tst(new(Common)))
|
t.Run("common", tst(new(Common)))
|
||||||
t.Run("full", tst(new(FullNode)))
|
t.Run("full", tst(new(FullNode)))
|
||||||
t.Run("miner", tst(new(StorageMiner)))
|
t.Run("miner", tst(new(StorageMiner)))
|
||||||
t.Run("worker", tst(new(WorkerAPI)))
|
t.Run("worker", tst(new(Worker)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPermTags(t *testing.T) {
|
||||||
|
_ = PermissionedFullAPI(&FullNodeStruct{})
|
||||||
|
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
||||||
|
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
||||||
}
|
}
|
||||||
|
|||||||
@ -34,14 +34,14 @@ type MsgMeta struct {
|
|||||||
Extra []byte
|
Extra []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type WalletAPI interface {
|
type Wallet interface {
|
||||||
WalletNew(context.Context, types.KeyType) (address.Address, error)
|
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin
|
||||||
WalletHas(context.Context, address.Address) (bool, error)
|
WalletHas(context.Context, address.Address) (bool, error) //perm:admin
|
||||||
WalletList(context.Context) ([]address.Address, error)
|
WalletList(context.Context) ([]address.Address, error) //perm:admin
|
||||||
|
|
||||||
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error)
|
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin
|
||||||
|
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
|
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
|
||||||
WalletDelete(context.Context, address.Address) error
|
WalletDelete(context.Context, address.Address) error //perm:admin
|
||||||
}
|
}
|
||||||
|
|||||||
@ -4,29 +4,69 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
"github.com/filecoin-project/specs-storage/storage"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WorkerAPI interface {
|
// MODIFYING THE API INTERFACE
|
||||||
Version(context.Context) (build.Version, error)
|
//
|
||||||
// TODO: Info() (name, ...) ?
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
|
type Worker interface {
|
||||||
Paths(context.Context) ([]stores.StoragePath, error)
|
Version(context.Context) (Version, error) //perm:admin
|
||||||
Info(context.Context) (storiface.WorkerInfo, error)
|
|
||||||
|
|
||||||
storiface.WorkerCalls
|
// TaskType -> Weight
|
||||||
|
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
|
||||||
|
Paths(context.Context) ([]stores.StoragePath, error) //perm:admin
|
||||||
|
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
|
||||||
|
|
||||||
|
// storiface.WorkerCalls
|
||||||
|
AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) //perm:admin
|
||||||
|
SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
|
||||||
|
SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) //perm:admin
|
||||||
|
SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin
|
||||||
|
SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin
|
||||||
|
FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin
|
||||||
|
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
|
||||||
|
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
|
||||||
|
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
|
||||||
|
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
|
||||||
|
|
||||||
|
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||||
|
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
|
||||||
|
|
||||||
// Storage / Other
|
// Storage / Other
|
||||||
Remove(ctx context.Context, sector abi.SectorID) error
|
Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
|
||||||
|
|
||||||
StorageAddLocal(ctx context.Context, path string) error
|
StorageAddLocal(ctx context.Context, path string) error //perm:admin
|
||||||
|
|
||||||
Session(context.Context) (uuid.UUID, error)
|
// SetEnabled marks the worker as enabled/disabled. Not that this setting
|
||||||
|
// may take a few seconds to propagate to task scheduler
|
||||||
|
SetEnabled(ctx context.Context, enabled bool) error //perm:admin
|
||||||
|
|
||||||
|
Enabled(ctx context.Context) (bool, error) //perm:admin
|
||||||
|
|
||||||
|
// WaitQuiet blocks until there are no tasks running
|
||||||
|
WaitQuiet(ctx context.Context) error //perm:admin
|
||||||
|
|
||||||
|
// returns a random UUID of worker session, generated randomly when worker
|
||||||
|
// process starts
|
||||||
|
ProcessSession(context.Context) (uuid.UUID, error) //perm:admin
|
||||||
|
|
||||||
|
// Like ProcessSession, but returns an error when worker is disabled
|
||||||
|
Session(context.Context) (uuid.UUID, error) //perm:admin
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ storiface.WorkerCalls = *new(Worker)
|
||||||
|
|||||||
@ -1,68 +0,0 @@
|
|||||||
package apibstore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ChainIO interface {
|
|
||||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
|
||||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type apiBStore struct {
|
|
||||||
api ChainIO
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore {
|
|
||||||
return &apiBStore{
|
|
||||||
api: cio,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) DeleteBlock(cid.Cid) error {
|
|
||||||
return xerrors.New("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) Has(c cid.Cid) (bool, error) {
|
|
||||||
return a.api.ChainHasObj(context.TODO(), c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) {
|
|
||||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return blocks.NewBlockWithCid(bb, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) GetSize(c cid.Cid) (int, error) {
|
|
||||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return len(bb), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) Put(blocks.Block) error {
|
|
||||||
return xerrors.New("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) PutMany([]blocks.Block) error {
|
|
||||||
return xerrors.New("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|
||||||
return nil, xerrors.New("not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *apiBStore) HashOnRead(enabled bool) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ blockstore.Blockstore = &apiBStore{}
|
|
||||||
@ -1,44 +0,0 @@
|
|||||||
package apistruct
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// When changing these, update docs/API.md too
|
|
||||||
|
|
||||||
PermRead auth.Permission = "read" // default
|
|
||||||
PermWrite auth.Permission = "write"
|
|
||||||
PermSign auth.Permission = "sign" // Use wallet keys for signing
|
|
||||||
PermAdmin auth.Permission = "admin" // Manage permissions
|
|
||||||
)
|
|
||||||
|
|
||||||
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
|
|
||||||
var DefaultPerms = []auth.Permission{PermRead}
|
|
||||||
|
|
||||||
func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
|
|
||||||
var out StorageMinerStruct
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
|
|
||||||
func PermissionedFullAPI(a api.FullNode) api.FullNode {
|
|
||||||
var out FullNodeStruct
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
|
|
||||||
func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
|
|
||||||
var out WorkerStruct
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
|
|
||||||
func PermissionedWalletAPI(a api.WalletAPI) api.WalletAPI {
|
|
||||||
var out WalletStruct
|
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
|
||||||
return &out
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@ -1,9 +0,0 @@
|
|||||||
package apistruct
|
|
||||||
|
|
||||||
import "testing"
|
|
||||||
|
|
||||||
func TestPermTags(t *testing.T) {
|
|
||||||
_ = PermissionedFullAPI(&FullNodeStruct{})
|
|
||||||
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
|
||||||
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
|
||||||
}
|
|
||||||
398
api/cbor_gen.go
398
api/cbor_gen.go
@ -5,14 +5,19 @@ package api
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
|
market "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
cbg "github.com/whyrusleeping/cbor-gen"
|
||||||
xerrors "golang.org/x/xerrors"
|
xerrors "golang.org/x/xerrors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = xerrors.Errorf
|
var _ = xerrors.Errorf
|
||||||
|
var _ = cid.Undef
|
||||||
|
var _ = sort.Sort
|
||||||
|
|
||||||
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||||
if t == nil {
|
if t == nil {
|
||||||
@ -171,7 +176,8 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,7 +325,8 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +434,8 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,7 +583,8 @@ func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -723,7 +732,386 @@ func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *PieceDealInfo) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{165}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.PublishCid (cid.Cid) (struct)
|
||||||
|
if len("PublishCid") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"PublishCid\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("PublishCid")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.PublishCid == nil {
|
||||||
|
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil {
|
||||||
|
return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.DealID (abi.DealID) (uint64)
|
||||||
|
if len("DealID") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"DealID\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("DealID")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.DealProposal (market.DealProposal) (struct)
|
||||||
|
if len("DealProposal") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"DealProposal\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealProposal"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("DealProposal")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.DealProposal.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.DealSchedule (api.DealSchedule) (struct)
|
||||||
|
if len("DealSchedule") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"DealSchedule\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealSchedule"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("DealSchedule")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.DealSchedule.MarshalCBOR(w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.KeepUnsealed (bool) (bool)
|
||||||
|
if len("KeepUnsealed") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"KeepUnsealed\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("KeepUnsealed"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("KeepUnsealed")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteBool(w, t.KeepUnsealed); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *PieceDealInfo) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = PieceDealInfo{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("PieceDealInfo: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.PublishCid (cid.Cid) (struct)
|
||||||
|
case "PublishCid":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
b, err := br.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b != cbg.CborNull[0] {
|
||||||
|
if err := br.UnreadByte(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := cbg.ReadCid(br)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.PublishCid = &c
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.DealID (abi.DealID) (uint64)
|
||||||
|
case "DealID":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajUnsignedInt {
|
||||||
|
return fmt.Errorf("wrong type for uint64 field")
|
||||||
|
}
|
||||||
|
t.DealID = abi.DealID(extra)
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.DealProposal (market.DealProposal) (struct)
|
||||||
|
case "DealProposal":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
b, err := br.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b != cbg.CborNull[0] {
|
||||||
|
if err := br.UnreadByte(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.DealProposal = new(market.DealProposal)
|
||||||
|
if err := t.DealProposal.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.DealSchedule (api.DealSchedule) (struct)
|
||||||
|
case "DealSchedule":
|
||||||
|
|
||||||
|
{
|
||||||
|
|
||||||
|
if err := t.DealSchedule.UnmarshalCBOR(br); err != nil {
|
||||||
|
return xerrors.Errorf("unmarshaling t.DealSchedule: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
// t.KeepUnsealed (bool) (bool)
|
||||||
|
case "KeepUnsealed":
|
||||||
|
|
||||||
|
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajOther {
|
||||||
|
return fmt.Errorf("booleans must be major type 7")
|
||||||
|
}
|
||||||
|
switch extra {
|
||||||
|
case 20:
|
||||||
|
t.KeepUnsealed = false
|
||||||
|
case 21:
|
||||||
|
t.KeepUnsealed = true
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (t *DealSchedule) MarshalCBOR(w io.Writer) error {
|
||||||
|
if t == nil {
|
||||||
|
_, err := w.Write(cbg.CborNull)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := w.Write([]byte{162}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scratch := make([]byte, 9)
|
||||||
|
|
||||||
|
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||||
|
if len("StartEpoch") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"StartEpoch\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StartEpoch"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("StartEpoch")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.StartEpoch >= 0 {
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.StartEpoch)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.StartEpoch-1)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||||
|
if len("EndEpoch") > cbg.MaxLength {
|
||||||
|
return xerrors.Errorf("Value in field \"EndEpoch\" was too long")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("EndEpoch"))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := io.WriteString(w, string("EndEpoch")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.EndEpoch >= 0 {
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EndEpoch)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EndEpoch-1)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *DealSchedule) UnmarshalCBOR(r io.Reader) error {
|
||||||
|
*t = DealSchedule{}
|
||||||
|
|
||||||
|
br := cbg.GetPeeker(r)
|
||||||
|
scratch := make([]byte, 8)
|
||||||
|
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if maj != cbg.MajMap {
|
||||||
|
return fmt.Errorf("cbor input should be of type map")
|
||||||
|
}
|
||||||
|
|
||||||
|
if extra > cbg.MaxLength {
|
||||||
|
return fmt.Errorf("DealSchedule: map struct too large (%d)", extra)
|
||||||
|
}
|
||||||
|
|
||||||
|
var name string
|
||||||
|
n := extra
|
||||||
|
|
||||||
|
for i := uint64(0); i < n; i++ {
|
||||||
|
|
||||||
|
{
|
||||||
|
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
name = string(sval)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch name {
|
||||||
|
// t.StartEpoch (abi.ChainEpoch) (int64)
|
||||||
|
case "StartEpoch":
|
||||||
|
{
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
var extraI int64
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch maj {
|
||||||
|
case cbg.MajUnsignedInt:
|
||||||
|
extraI = int64(extra)
|
||||||
|
if extraI < 0 {
|
||||||
|
return fmt.Errorf("int64 positive overflow")
|
||||||
|
}
|
||||||
|
case cbg.MajNegativeInt:
|
||||||
|
extraI = int64(extra)
|
||||||
|
if extraI < 0 {
|
||||||
|
return fmt.Errorf("int64 negative oveflow")
|
||||||
|
}
|
||||||
|
extraI = -1 - extraI
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.StartEpoch = abi.ChainEpoch(extraI)
|
||||||
|
}
|
||||||
|
// t.EndEpoch (abi.ChainEpoch) (int64)
|
||||||
|
case "EndEpoch":
|
||||||
|
{
|
||||||
|
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||||
|
var extraI int64
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch maj {
|
||||||
|
case cbg.MajUnsignedInt:
|
||||||
|
extraI = int64(extra)
|
||||||
|
if extraI < 0 {
|
||||||
|
return fmt.Errorf("int64 positive overflow")
|
||||||
|
}
|
||||||
|
case cbg.MajNegativeInt:
|
||||||
|
extraI = int64(extra)
|
||||||
|
if extraI < 0 {
|
||||||
|
return fmt.Errorf("int64 negative oveflow")
|
||||||
|
}
|
||||||
|
extraI = -1 - extraI
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.EndEpoch = abi.ChainEpoch(extraI)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
// Field doesn't exist on this type, so ignore it
|
||||||
|
cbg.ScanForLinks(r, func(cid.Cid) {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
35
api/checkstatuscode_string.go
Normal file
35
api/checkstatuscode_string.go
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package api
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[CheckStatusMessageSerialize-1]
|
||||||
|
_ = x[CheckStatusMessageSize-2]
|
||||||
|
_ = x[CheckStatusMessageValidity-3]
|
||||||
|
_ = x[CheckStatusMessageMinGas-4]
|
||||||
|
_ = x[CheckStatusMessageMinBaseFee-5]
|
||||||
|
_ = x[CheckStatusMessageBaseFee-6]
|
||||||
|
_ = x[CheckStatusMessageBaseFeeLowerBound-7]
|
||||||
|
_ = x[CheckStatusMessageBaseFeeUpperBound-8]
|
||||||
|
_ = x[CheckStatusMessageGetStateNonce-9]
|
||||||
|
_ = x[CheckStatusMessageNonce-10]
|
||||||
|
_ = x[CheckStatusMessageGetStateBalance-11]
|
||||||
|
_ = x[CheckStatusMessageBalance-12]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance"
|
||||||
|
|
||||||
|
var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202}
|
||||||
|
|
||||||
|
func (i CheckStatusCode) String() string {
|
||||||
|
i -= 1
|
||||||
|
if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) {
|
||||||
|
return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
|
||||||
|
}
|
||||||
|
return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]]
|
||||||
|
}
|
||||||
@ -10,72 +10,84 @@ import (
|
|||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCommonRPC creates a new http jsonrpc client.
|
// NewCommonRPCV0 creates a new http jsonrpc client.
|
||||||
func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.CommonStruct
|
var res v0api.CommonNetStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res), requestHeader)
|
||||||
&res.Internal,
|
|
||||||
},
|
|
||||||
requestHeader,
|
|
||||||
)
|
|
||||||
|
|
||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFullNodeRPC creates a new http jsonrpc client.
|
// NewFullNodeRPCV0 creates a new http jsonrpc client.
|
||||||
func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.FullNodeStruct
|
var res v0api.FullNodeStruct
|
||||||
|
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res), requestHeader)
|
||||||
&res.CommonStruct.Internal,
|
|
||||||
&res.Internal,
|
|
||||||
}, requestHeader)
|
|
||||||
|
|
||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStorageMinerRPC creates a new http jsonrpc client for miner
|
// NewFullNodeRPCV1 creates a new http jsonrpc client.
|
||||||
func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.StorageMinerStruct
|
var res v1api.FullNodeStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res), requestHeader)
|
||||||
&res.CommonStruct.Internal,
|
|
||||||
&res.Internal,
|
|
||||||
},
|
|
||||||
requestHeader,
|
|
||||||
opts...,
|
|
||||||
)
|
|
||||||
|
|
||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
func getPushUrl(addr string) (string, error) {
|
||||||
u, err := url.Parse(addr)
|
pushUrl, err := url.Parse(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
switch u.Scheme {
|
switch pushUrl.Scheme {
|
||||||
case "ws":
|
case "ws":
|
||||||
u.Scheme = "http"
|
pushUrl.Scheme = "http"
|
||||||
case "wss":
|
case "wss":
|
||||||
u.Scheme = "https"
|
pushUrl.Scheme = "https"
|
||||||
}
|
}
|
||||||
///rpc/v0 -> /rpc/streams/v0/push
|
///rpc/v0 -> /rpc/streams/v0/push
|
||||||
|
|
||||||
u.Path = path.Join(u.Path, "../streams/v0/push")
|
pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push")
|
||||||
|
return pushUrl.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
var res apistruct.WorkerStruct
|
// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
|
||||||
|
func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||||
|
pushUrl, err := getPushUrl(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res v0api.StorageMinerStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res), requestHeader,
|
||||||
&res.Internal,
|
append([]jsonrpc.Option{
|
||||||
},
|
rpcenc.ReaderParamEncoder(pushUrl),
|
||||||
|
}, opts...)...)
|
||||||
|
|
||||||
|
return &res, closer, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) {
|
||||||
|
pushUrl, err := getPushUrl(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var res api.WorkerStruct
|
||||||
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
|
api.GetInternalStructs(&res),
|
||||||
requestHeader,
|
requestHeader,
|
||||||
rpcenc.ReaderParamEncoder(u.String()),
|
rpcenc.ReaderParamEncoder(pushUrl),
|
||||||
jsonrpc.WithNoReconnect(),
|
jsonrpc.WithNoReconnect(),
|
||||||
jsonrpc.WithTimeout(30*time.Second),
|
jsonrpc.WithTimeout(30*time.Second),
|
||||||
)
|
)
|
||||||
@ -83,13 +95,11 @@ func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (
|
|||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGatewayRPC creates a new http jsonrpc client for a gateway node.
|
// NewGatewayRPCV1 creates a new http jsonrpc client for a gateway node.
|
||||||
func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.GatewayAPI, jsonrpc.ClientCloser, error) {
|
func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.GatewayStruct
|
var res api.GatewayStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res),
|
||||||
&res.Internal,
|
|
||||||
},
|
|
||||||
requestHeader,
|
requestHeader,
|
||||||
opts...,
|
opts...,
|
||||||
)
|
)
|
||||||
@ -97,12 +107,22 @@ func NewGatewayRPC(ctx context.Context, addr string, requestHeader http.Header,
|
|||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWalletRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WalletAPI, jsonrpc.ClientCloser, error) {
|
// NewGatewayRPCV0 creates a new http jsonrpc client for a gateway node.
|
||||||
var res apistruct.WalletStruct
|
func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
|
||||||
|
var res v0api.GatewayStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
[]interface{}{
|
api.GetInternalStructs(&res),
|
||||||
&res.Internal,
|
requestHeader,
|
||||||
},
|
opts...,
|
||||||
|
)
|
||||||
|
|
||||||
|
return &res, closer, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
|
||||||
|
var res api.WalletStruct
|
||||||
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
|
api.GetInternalStructs(&res),
|
||||||
requestHeader,
|
requestHeader,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
74
api/docgen-openrpc/cmd/docgen_openrpc.go
Normal file
74
api/docgen-openrpc/cmd/docgen_openrpc.go
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/docgen"
|
||||||
|
|
||||||
|
docgen_openrpc "github.com/filecoin-project/lotus/api/docgen-openrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
main defines a small program that writes an OpenRPC document describing
|
||||||
|
a Lotus API to stdout.
|
||||||
|
|
||||||
|
If the first argument is "miner", the document will describe the StorageMiner API.
|
||||||
|
If not (no, or any other args), the document will describe the Full API.
|
||||||
|
|
||||||
|
Use:
|
||||||
|
|
||||||
|
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"]
|
||||||
|
|
||||||
|
With gzip compression: a '-gzip' flag is made available as an optional third argument. Note that position matters.
|
||||||
|
|
||||||
|
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] -gzip
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
Comments, GroupDocs := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
|
||||||
|
|
||||||
|
doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
|
||||||
|
|
||||||
|
i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
|
||||||
|
doc.RegisterReceiverName("Filecoin", i)
|
||||||
|
|
||||||
|
out, err := doc.Discover()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonOut []byte
|
||||||
|
var writer io.WriteCloser
|
||||||
|
|
||||||
|
// Use os.Args to handle a somewhat hacky flag for the gzip option.
|
||||||
|
// Could use flags package to handle this more cleanly, but that requires changes elsewhere
|
||||||
|
// the scope of which just isn't warranted by this one use case which will usually be run
|
||||||
|
// programmatically anyways.
|
||||||
|
if len(os.Args) > 5 && os.Args[5] == "-gzip" {
|
||||||
|
jsonOut, err = json.Marshal(out)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
writer = gzip.NewWriter(os.Stdout)
|
||||||
|
} else {
|
||||||
|
jsonOut, err = json.MarshalIndent(out, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
writer = os.Stdout
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = writer.Write(jsonOut)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
err = writer.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalln(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
161
api/docgen-openrpc/openrpc.go
Normal file
161
api/docgen-openrpc/openrpc.go
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
package docgenopenrpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"go/ast"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/alecthomas/jsonschema"
|
||||||
|
go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect"
|
||||||
|
"github.com/filecoin-project/lotus/api/docgen"
|
||||||
|
"github.com/filecoin-project/lotus/build"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
meta_schema "github.com/open-rpc/meta-schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
// schemaDictEntry represents a type association passed to the jsonschema reflector.
|
||||||
|
type schemaDictEntry struct {
|
||||||
|
example interface{}
|
||||||
|
rawJson string
|
||||||
|
}
|
||||||
|
|
||||||
|
const integerD = `{
|
||||||
|
"title": "number",
|
||||||
|
"type": "number",
|
||||||
|
"description": "Number is a number"
|
||||||
|
}`
|
||||||
|
|
||||||
|
const cidCidD = `{"title": "Content Identifier", "type": "string", "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash."}`
|
||||||
|
|
||||||
|
func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type {
|
||||||
|
unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type {
|
||||||
|
var js jsonschema.Type
|
||||||
|
err := json.Unmarshal([]byte(input), &js)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return &js
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty.Kind() == reflect.Ptr {
|
||||||
|
ty = ty.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if ty == reflect.TypeOf((*interface{})(nil)).Elem() {
|
||||||
|
return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second, handle other types.
|
||||||
|
// Use a slice instead of a map because it preserves order, as a logic safeguard/fallback.
|
||||||
|
dict := []schemaDictEntry{
|
||||||
|
{cid.Cid{}, cidCidD},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d := range dict {
|
||||||
|
if reflect.TypeOf(d.example) == ty {
|
||||||
|
tt := unmarshalJSONToJSONSchemaType(d.rawJson)
|
||||||
|
|
||||||
|
return tt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle primitive types in case there are generic cases
|
||||||
|
// specific to our services.
|
||||||
|
switch ty.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
// Return all integer types as the hex representation integer schemea.
|
||||||
|
ret := unmarshalJSONToJSONSchemaType(integerD)
|
||||||
|
return ret
|
||||||
|
case reflect.Uintptr:
|
||||||
|
return &jsonschema.Type{Type: "number", Title: "uintptr-title"}
|
||||||
|
case reflect.Struct:
|
||||||
|
case reflect.Map:
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
case reflect.Bool:
|
||||||
|
case reflect.String:
|
||||||
|
case reflect.Ptr, reflect.Interface:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLotusOpenRPCDocument defines application-specific documentation and configuration for its OpenRPC document.
|
||||||
|
func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_reflect.Document {
|
||||||
|
d := &go_openrpc_reflect.Document{}
|
||||||
|
|
||||||
|
// Register "Meta" document fields.
|
||||||
|
// These include getters for
|
||||||
|
// - Servers object
|
||||||
|
// - Info object
|
||||||
|
// - ExternalDocs object
|
||||||
|
//
|
||||||
|
// These objects represent server-specific data that cannot be
|
||||||
|
// reflected.
|
||||||
|
d.WithMeta(&go_openrpc_reflect.MetaT{
|
||||||
|
GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) {
|
||||||
|
return func(listeners []net.Listener) (*meta_schema.Servers, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
GetInfoFn: func() (info *meta_schema.InfoObject) {
|
||||||
|
info = &meta_schema.InfoObject{}
|
||||||
|
title := "Lotus RPC API"
|
||||||
|
info.Title = (*meta_schema.InfoObjectProperties)(&title)
|
||||||
|
|
||||||
|
version := build.BuildVersion
|
||||||
|
info.Version = (*meta_schema.InfoObjectVersion)(&version)
|
||||||
|
return info
|
||||||
|
},
|
||||||
|
GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) {
|
||||||
|
return nil // FIXME
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Use a provided Ethereum default configuration as a base.
|
||||||
|
appReflector := &go_openrpc_reflect.EthereumReflectorT{}
|
||||||
|
|
||||||
|
// Install overrides for the json schema->type map fn used by the jsonschema reflect package.
|
||||||
|
appReflector.FnSchemaTypeMap = func() func(ty reflect.Type) *jsonschema.Type {
|
||||||
|
return OpenRPCSchemaTypeMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
appReflector.FnIsMethodEligible = func(m reflect.Method) bool {
|
||||||
|
for i := 0; i < m.Func.Type().NumOut(); i++ {
|
||||||
|
if m.Func.Type().Out(i).Kind() == reflect.Chan {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m)
|
||||||
|
}
|
||||||
|
appReflector.FnGetMethodName = func(moduleName string, r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
|
||||||
|
if m.Name == "ID" {
|
||||||
|
return moduleName + "_ID", nil
|
||||||
|
}
|
||||||
|
if moduleName == "rpc" && m.Name == "Discover" {
|
||||||
|
return "rpc.discover", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return moduleName + "." + m.Name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
|
||||||
|
if v, ok := Comments[m.Name]; ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
return "", nil // noComment
|
||||||
|
}
|
||||||
|
|
||||||
|
appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) {
|
||||||
|
v := docgen.ExampleValue("unknown", ty, ty) // This isn't ideal, but seems to work well enough.
|
||||||
|
return &meta_schema.Examples{
|
||||||
|
meta_schema.AlwaysTrue(v),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, register the configured reflector to the document.
|
||||||
|
d.WithReflector(appReflector)
|
||||||
|
return d
|
||||||
|
}
|
||||||
121
api/docgen/cmd/docgen.go
Normal file
121
api/docgen/cmd/docgen.go
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api/docgen"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
comments, groupComments := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
|
||||||
|
|
||||||
|
groups := make(map[string]*docgen.MethodGroup)
|
||||||
|
|
||||||
|
_, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
|
||||||
|
|
||||||
|
for i := 0; i < t.NumMethod(); i++ {
|
||||||
|
m := t.Method(i)
|
||||||
|
|
||||||
|
groupName := docgen.MethodGroupFromName(m.Name)
|
||||||
|
|
||||||
|
g, ok := groups[groupName]
|
||||||
|
if !ok {
|
||||||
|
g = new(docgen.MethodGroup)
|
||||||
|
g.Header = groupComments[groupName]
|
||||||
|
g.GroupName = groupName
|
||||||
|
groups[groupName] = g
|
||||||
|
}
|
||||||
|
|
||||||
|
var args []interface{}
|
||||||
|
ft := m.Func.Type()
|
||||||
|
for j := 2; j < ft.NumIn(); j++ {
|
||||||
|
inp := ft.In(j)
|
||||||
|
args = append(args, docgen.ExampleValue(m.Name, inp, nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := json.MarshalIndent(args, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
outv := docgen.ExampleValue(m.Name, ft.Out(0), nil)
|
||||||
|
|
||||||
|
ov, err := json.MarshalIndent(outv, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
g.Methods = append(g.Methods, &docgen.Method{
|
||||||
|
Name: m.Name,
|
||||||
|
Comment: comments[m.Name],
|
||||||
|
InputExample: string(v),
|
||||||
|
ResponseExample: string(ov),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var groupslice []*docgen.MethodGroup
|
||||||
|
for _, g := range groups {
|
||||||
|
groupslice = append(groupslice, g)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Slice(groupslice, func(i, j int) bool {
|
||||||
|
return groupslice[i].GroupName < groupslice[j].GroupName
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Printf("# Groups\n")
|
||||||
|
|
||||||
|
for _, g := range groupslice {
|
||||||
|
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
|
||||||
|
for _, method := range g.Methods {
|
||||||
|
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, g := range groupslice {
|
||||||
|
g := g
|
||||||
|
fmt.Printf("## %s\n", g.GroupName)
|
||||||
|
fmt.Printf("%s\n\n", g.Header)
|
||||||
|
|
||||||
|
sort.Slice(g.Methods, func(i, j int) bool {
|
||||||
|
return g.Methods[i].Name < g.Methods[j].Name
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, m := range g.Methods {
|
||||||
|
fmt.Printf("### %s\n", m.Name)
|
||||||
|
fmt.Printf("%s\n\n", m.Comment)
|
||||||
|
|
||||||
|
var meth reflect.StructField
|
||||||
|
var ok bool
|
||||||
|
for _, ps := range permStruct {
|
||||||
|
meth, ok = ps.FieldByName(m.Name)
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
panic("no perms for method: " + m.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
perms := meth.Tag.Get("perm")
|
||||||
|
|
||||||
|
fmt.Printf("Perms: %s\n\n", perms)
|
||||||
|
|
||||||
|
if strings.Count(m.InputExample, "\n") > 0 {
|
||||||
|
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Count(m.ResponseExample, "\n") > 0 {
|
||||||
|
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,17 +1,19 @@
|
|||||||
package main
|
package docgen
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/parser"
|
"go/parser"
|
||||||
"go/token"
|
"go/token"
|
||||||
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/ipfs/go-filestore"
|
"github.com/ipfs/go-filestore"
|
||||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||||
@ -21,9 +23,8 @@ import (
|
|||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-bitfield"
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
|
filestore2 "github.com/filecoin-project/go-fil-markets/filestore"
|
||||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
"github.com/filecoin-project/go-multistore"
|
"github.com/filecoin-project/go-multistore"
|
||||||
@ -33,9 +34,14 @@ import (
|
|||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/apistruct"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
"github.com/filecoin-project/lotus/api/v0api"
|
||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||||
|
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||||
|
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -82,8 +88,10 @@ func init() {
|
|||||||
addExample(pid)
|
addExample(pid)
|
||||||
addExample(&pid)
|
addExample(&pid)
|
||||||
|
|
||||||
|
multistoreIDExample := multistore.StoreID(50)
|
||||||
|
|
||||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
|
||||||
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
||||||
addExample(abi.ChainEpoch(10101))
|
addExample(abi.ChainEpoch(10101))
|
||||||
addExample(crypto.SigTypeBLS)
|
addExample(crypto.SigTypeBLS)
|
||||||
@ -106,28 +114,31 @@ func init() {
|
|||||||
addExample(network.Connected)
|
addExample(network.Connected)
|
||||||
addExample(dtypes.NetworkName("lotus"))
|
addExample(dtypes.NetworkName("lotus"))
|
||||||
addExample(api.SyncStateStage(1))
|
addExample(api.SyncStateStage(1))
|
||||||
addExample(build.FullAPIVersion)
|
addExample(api.FullAPIVersion1)
|
||||||
addExample(api.PCHInbound)
|
addExample(api.PCHInbound)
|
||||||
addExample(time.Minute)
|
addExample(time.Minute)
|
||||||
addExample(datatransfer.TransferID(3))
|
addExample(datatransfer.TransferID(3))
|
||||||
addExample(datatransfer.Ongoing)
|
addExample(datatransfer.Ongoing)
|
||||||
addExample(multistore.StoreID(50))
|
addExample(multistoreIDExample)
|
||||||
|
addExample(&multistoreIDExample)
|
||||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||||
addExample(retrievalmarket.DealStatusNew)
|
addExample(retrievalmarket.DealStatusNew)
|
||||||
addExample(network.ReachabilityPublic)
|
addExample(network.ReachabilityPublic)
|
||||||
addExample(build.NewestNetworkVersion)
|
addExample(build.NewestNetworkVersion)
|
||||||
|
addExample(map[string]int{"name": 42})
|
||||||
|
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
|
||||||
addExample(&types.ExecutionTrace{
|
addExample(&types.ExecutionTrace{
|
||||||
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
Msg: ExampleValue("init", reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||||
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
MsgRct: ExampleValue("init", reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||||
})
|
})
|
||||||
addExample(map[string]types.Actor{
|
addExample(map[string]types.Actor{
|
||||||
"t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
||||||
})
|
})
|
||||||
addExample(map[string]api.MarketDeal{
|
addExample(map[string]api.MarketDeal{
|
||||||
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
|
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
|
||||||
})
|
})
|
||||||
addExample(map[string]api.MarketBalance{
|
addExample(map[string]api.MarketBalance{
|
||||||
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
||||||
})
|
})
|
||||||
addExample(map[string]*pubsub.TopicScoreSnapshot{
|
addExample(map[string]*pubsub.TopicScoreSnapshot{
|
||||||
"/blocks": {
|
"/blocks": {
|
||||||
@ -162,9 +173,139 @@ func init() {
|
|||||||
// because reflect.TypeOf(maddr) returns the concrete type...
|
// because reflect.TypeOf(maddr) returns the concrete type...
|
||||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||||
|
|
||||||
|
// miner specific
|
||||||
|
addExample(filestore2.Path(".lotusminer/fstmp123"))
|
||||||
|
si := multistore.StoreID(12)
|
||||||
|
addExample(&si)
|
||||||
|
addExample(retrievalmarket.DealID(5))
|
||||||
|
addExample(abi.ActorID(1000))
|
||||||
|
addExample(map[string][]api.SealedRef{
|
||||||
|
"98000": {
|
||||||
|
api.SealedRef{
|
||||||
|
SectorID: 100,
|
||||||
|
Offset: 10 << 20,
|
||||||
|
Size: 1 << 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
addExample(api.SectorState(sealing.Proving))
|
||||||
|
addExample(stores.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
|
||||||
|
addExample(storiface.FTUnsealed)
|
||||||
|
addExample(storiface.PathSealing)
|
||||||
|
addExample(map[stores.ID][]stores.Decl{
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
|
||||||
|
{
|
||||||
|
SectorID: abi.SectorID{Miner: 1000, Number: 100},
|
||||||
|
SectorFileType: storiface.FTSealed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
addExample(map[stores.ID]string{
|
||||||
|
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
|
||||||
|
})
|
||||||
|
addExample(map[uuid.UUID][]storiface.WorkerJob{
|
||||||
|
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
|
||||||
|
{
|
||||||
|
ID: storiface.CallID{
|
||||||
|
Sector: abi.SectorID{Miner: 1000, Number: 100},
|
||||||
|
ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"),
|
||||||
|
},
|
||||||
|
Sector: abi.SectorID{Miner: 1000, Number: 100},
|
||||||
|
Task: sealtasks.TTPreCommit2,
|
||||||
|
RunWait: 0,
|
||||||
|
Start: time.Unix(1605172927, 0).UTC(),
|
||||||
|
Hostname: "host",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
addExample(map[uuid.UUID]storiface.WorkerStats{
|
||||||
|
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
|
||||||
|
Info: storiface.WorkerInfo{
|
||||||
|
Hostname: "host",
|
||||||
|
Resources: storiface.WorkerResources{
|
||||||
|
MemPhysical: 256 << 30,
|
||||||
|
MemSwap: 120 << 30,
|
||||||
|
MemReserved: 2 << 30,
|
||||||
|
CPUs: 64,
|
||||||
|
GPUs: []string{"aGPU 1337"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Enabled: true,
|
||||||
|
MemUsedMin: 0,
|
||||||
|
MemUsedMax: 0,
|
||||||
|
GpuUsed: false,
|
||||||
|
CpuUse: 0,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
addExample(storiface.ErrorCode(0))
|
||||||
|
addExample(map[abi.SectorNumber]string{
|
||||||
|
123: "can't acquire read lock",
|
||||||
|
})
|
||||||
|
addExample(map[api.SectorState]int{
|
||||||
|
api.SectorState(sealing.Proving): 120,
|
||||||
|
})
|
||||||
|
addExample([]abi.SectorNumber{123, 124})
|
||||||
|
|
||||||
|
// worker specific
|
||||||
|
addExample(storiface.AcquireMove)
|
||||||
|
addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded()))
|
||||||
|
addExample(map[sealtasks.TaskType]struct{}{
|
||||||
|
sealtasks.TTPreCommit2: {},
|
||||||
|
})
|
||||||
|
addExample(sealtasks.TTCommit2)
|
||||||
|
addExample(apitypes.OpenRPCDocument{
|
||||||
|
"openrpc": "1.2.6",
|
||||||
|
"info": map[string]interface{}{
|
||||||
|
"title": "Lotus RPC API",
|
||||||
|
"version": "1.2.1/generated=2020-11-22T08:22:42-06:00",
|
||||||
|
},
|
||||||
|
"methods": []interface{}{}},
|
||||||
|
)
|
||||||
|
|
||||||
|
addExample(api.CheckStatusCode(0))
|
||||||
|
addExample(map[string]interface{}{"abc": 123})
|
||||||
}
|
}
|
||||||
|
|
||||||
func exampleValue(t, parent reflect.Type) interface{} {
|
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
|
||||||
|
|
||||||
|
switch pkg {
|
||||||
|
case "api": // latest
|
||||||
|
switch name {
|
||||||
|
case "FullNode":
|
||||||
|
i = &api.FullNodeStruct{}
|
||||||
|
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
|
||||||
|
case "StorageMiner":
|
||||||
|
i = &api.StorageMinerStruct{}
|
||||||
|
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
|
||||||
|
case "Worker":
|
||||||
|
i = &api.WorkerStruct{}
|
||||||
|
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
|
||||||
|
default:
|
||||||
|
panic("unknown type")
|
||||||
|
}
|
||||||
|
case "v0api":
|
||||||
|
switch name {
|
||||||
|
case "FullNode":
|
||||||
|
i = v0api.FullNodeStruct{}
|
||||||
|
t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal))
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal))
|
||||||
|
default:
|
||||||
|
panic("unknown type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleValue(method string, t, parent reflect.Type) interface{} {
|
||||||
v, ok := ExampleValues[t]
|
v, ok := ExampleValues[t]
|
||||||
if ok {
|
if ok {
|
||||||
return v
|
return v
|
||||||
@ -173,25 +314,25 @@ func exampleValue(t, parent reflect.Type) interface{} {
|
|||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
out := reflect.New(t).Elem()
|
out := reflect.New(t).Elem()
|
||||||
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
|
reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
|
||||||
return out.Interface()
|
return out.Interface()
|
||||||
case reflect.Chan:
|
case reflect.Chan:
|
||||||
return exampleValue(t.Elem(), nil)
|
return ExampleValue(method, t.Elem(), nil)
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
es := exampleStruct(t, parent)
|
es := exampleStruct(method, t, parent)
|
||||||
v := reflect.ValueOf(es).Elem().Interface()
|
v := reflect.ValueOf(es).Elem().Interface()
|
||||||
ExampleValues[t] = v
|
ExampleValues[t] = v
|
||||||
return v
|
return v
|
||||||
case reflect.Array:
|
case reflect.Array:
|
||||||
out := reflect.New(t).Elem()
|
out := reflect.New(t).Elem()
|
||||||
for i := 0; i < t.Len(); i++ {
|
for i := 0; i < t.Len(); i++ {
|
||||||
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
|
out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
|
||||||
}
|
}
|
||||||
return out.Interface()
|
return out.Interface()
|
||||||
|
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
if t.Elem().Kind() == reflect.Struct {
|
if t.Elem().Kind() == reflect.Struct {
|
||||||
es := exampleStruct(t.Elem(), t)
|
es := exampleStruct(method, t.Elem(), t)
|
||||||
//ExampleValues[t] = es
|
//ExampleValues[t] = es
|
||||||
return es
|
return es
|
||||||
}
|
}
|
||||||
@ -199,10 +340,10 @@ func exampleValue(t, parent reflect.Type) interface{} {
|
|||||||
return struct{}{}
|
return struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
panic(fmt.Sprintf("No example value for type: %s", t))
|
panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method))
|
||||||
}
|
}
|
||||||
|
|
||||||
func exampleStruct(t, parent reflect.Type) interface{} {
|
func exampleStruct(method string, t, parent reflect.Type) interface{} {
|
||||||
ns := reflect.New(t)
|
ns := reflect.New(t)
|
||||||
for i := 0; i < t.NumField(); i++ {
|
for i := 0; i < t.NumField(); i++ {
|
||||||
f := t.Field(i)
|
f := t.Field(i)
|
||||||
@ -210,7 +351,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.Title(f.Name) == f.Name {
|
if strings.Title(f.Name) == f.Name {
|
||||||
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
|
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,6 +359,7 @@ func exampleStruct(t, parent reflect.Type) interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Visitor struct {
|
type Visitor struct {
|
||||||
|
Root string
|
||||||
Methods map[string]ast.Node
|
Methods map[string]ast.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +369,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
if st.Name.Name != "FullNode" {
|
if st.Name.Name != v.Root {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -241,32 +383,43 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
|
|||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
const noComment = "There are not yet any comments for this method."
|
const NoComment = "There are not yet any comments for this method."
|
||||||
|
|
||||||
func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
|
func ParseApiASTInfo(apiFile, iface, pkg, dir string) (comments map[string]string, groupDocs map[string]string) { //nolint:golint
|
||||||
fset := token.NewFileSet()
|
fset := token.NewFileSet()
|
||||||
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
|
apiDir, err := filepath.Abs(dir)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("./api filepath absolute error: ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
apiFile, err = filepath.Abs(apiFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("filepath absolute error: ", err, "file:", apiFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("parse error: ", err)
|
fmt.Println("parse error: ", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ap := pkgs["api"]
|
ap := pkgs[pkg]
|
||||||
|
|
||||||
f := ap.Files["api/api_full.go"]
|
f := ap.Files[apiFile]
|
||||||
|
|
||||||
cmap := ast.NewCommentMap(fset, f, f.Comments)
|
cmap := ast.NewCommentMap(fset, f, f.Comments)
|
||||||
|
|
||||||
v := &Visitor{make(map[string]ast.Node)}
|
v := &Visitor{iface, make(map[string]ast.Node)}
|
||||||
ast.Walk(v, pkgs["api"])
|
ast.Walk(v, ap)
|
||||||
|
|
||||||
groupDocs := make(map[string]string)
|
comments = make(map[string]string)
|
||||||
out := make(map[string]string)
|
groupDocs = make(map[string]string)
|
||||||
for mn, node := range v.Methods {
|
for mn, node := range v.Methods {
|
||||||
cs := cmap.Filter(node).Comments()
|
filteredComments := cmap.Filter(node).Comments()
|
||||||
if len(cs) == 0 {
|
if len(filteredComments) == 0 {
|
||||||
out[mn] = noComment
|
comments[mn] = NoComment
|
||||||
} else {
|
} else {
|
||||||
for _, c := range cs {
|
for _, c := range filteredComments {
|
||||||
if strings.HasPrefix(c.Text(), "MethodGroup:") {
|
if strings.HasPrefix(c.Text(), "MethodGroup:") {
|
||||||
parts := strings.Split(c.Text(), "\n")
|
parts := strings.Split(c.Text(), "\n")
|
||||||
groupName := strings.TrimSpace(parts[0][12:])
|
groupName := strings.TrimSpace(parts[0][12:])
|
||||||
@ -277,15 +430,19 @@ func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
last := cs[len(cs)-1].Text()
|
l := len(filteredComments) - 1
|
||||||
|
if len(filteredComments) > 1 {
|
||||||
|
l = len(filteredComments) - 2
|
||||||
|
}
|
||||||
|
last := filteredComments[l].Text()
|
||||||
if !strings.HasPrefix(last, "MethodGroup:") {
|
if !strings.HasPrefix(last, "MethodGroup:") {
|
||||||
out[mn] = last
|
comments[mn] = last
|
||||||
} else {
|
} else {
|
||||||
out[mn] = noComment
|
comments[mn] = NoComment
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return out, groupDocs
|
return comments, groupDocs
|
||||||
}
|
}
|
||||||
|
|
||||||
type MethodGroup struct {
|
type MethodGroup struct {
|
||||||
@ -301,7 +458,7 @@ type Method struct {
|
|||||||
ResponseExample string
|
ResponseExample string
|
||||||
}
|
}
|
||||||
|
|
||||||
func methodGroupFromName(mn string) string {
|
func MethodGroupFromName(mn string) string {
|
||||||
i := strings.IndexFunc(mn[1:], func(r rune) bool {
|
i := strings.IndexFunc(mn[1:], func(r rune) bool {
|
||||||
return unicode.IsUpper(r)
|
return unicode.IsUpper(r)
|
||||||
})
|
})
|
||||||
@ -310,112 +467,3 @@ func methodGroupFromName(mn string) string {
|
|||||||
}
|
}
|
||||||
return mn[:i+1]
|
return mn[:i+1]
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
|
||||||
|
|
||||||
comments, groupComments := parseApiASTInfo()
|
|
||||||
|
|
||||||
groups := make(map[string]*MethodGroup)
|
|
||||||
|
|
||||||
var api struct{ api.FullNode }
|
|
||||||
t := reflect.TypeOf(api)
|
|
||||||
for i := 0; i < t.NumMethod(); i++ {
|
|
||||||
m := t.Method(i)
|
|
||||||
|
|
||||||
groupName := methodGroupFromName(m.Name)
|
|
||||||
|
|
||||||
g, ok := groups[groupName]
|
|
||||||
if !ok {
|
|
||||||
g = new(MethodGroup)
|
|
||||||
g.Header = groupComments[groupName]
|
|
||||||
g.GroupName = groupName
|
|
||||||
groups[groupName] = g
|
|
||||||
}
|
|
||||||
|
|
||||||
var args []interface{}
|
|
||||||
ft := m.Func.Type()
|
|
||||||
for j := 2; j < ft.NumIn(); j++ {
|
|
||||||
inp := ft.In(j)
|
|
||||||
args = append(args, exampleValue(inp, nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := json.MarshalIndent(args, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
outv := exampleValue(ft.Out(0), nil)
|
|
||||||
|
|
||||||
ov, err := json.MarshalIndent(outv, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
g.Methods = append(g.Methods, &Method{
|
|
||||||
Name: m.Name,
|
|
||||||
Comment: comments[m.Name],
|
|
||||||
InputExample: string(v),
|
|
||||||
ResponseExample: string(ov),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var groupslice []*MethodGroup
|
|
||||||
for _, g := range groups {
|
|
||||||
groupslice = append(groupslice, g)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(groupslice, func(i, j int) bool {
|
|
||||||
return groupslice[i].GroupName < groupslice[j].GroupName
|
|
||||||
})
|
|
||||||
|
|
||||||
fmt.Printf("# Groups\n")
|
|
||||||
|
|
||||||
for _, g := range groupslice {
|
|
||||||
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
|
|
||||||
for _, method := range g.Methods {
|
|
||||||
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
|
|
||||||
commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
|
|
||||||
|
|
||||||
for _, g := range groupslice {
|
|
||||||
g := g
|
|
||||||
fmt.Printf("## %s\n", g.GroupName)
|
|
||||||
fmt.Printf("%s\n\n", g.Header)
|
|
||||||
|
|
||||||
sort.Slice(g.Methods, func(i, j int) bool {
|
|
||||||
return g.Methods[i].Name < g.Methods[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, m := range g.Methods {
|
|
||||||
fmt.Printf("### %s\n", m.Name)
|
|
||||||
fmt.Printf("%s\n\n", m.Comment)
|
|
||||||
|
|
||||||
meth, ok := permStruct.FieldByName(m.Name)
|
|
||||||
if !ok {
|
|
||||||
meth, ok = commonPermStruct.FieldByName(m.Name)
|
|
||||||
if !ok {
|
|
||||||
panic("no perms for method: " + m.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
perms := meth.Tag.Get("perm")
|
|
||||||
|
|
||||||
fmt.Printf("Perms: %s\n\n", perms)
|
|
||||||
|
|
||||||
if strings.Count(m.InputExample, "\n") > 0 {
|
|
||||||
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Count(m.ResponseExample, "\n") > 0 {
|
|
||||||
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
3094
api/mocks/mock_full.go
Normal file
3094
api/mocks/mock_full.go
Normal file
File diff suppressed because it is too large
Load Diff
48
api/permissioned.go
Normal file
48
api/permissioned.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// When changing these, update docs/API.md too
|
||||||
|
|
||||||
|
PermRead auth.Permission = "read" // default
|
||||||
|
PermWrite auth.Permission = "write"
|
||||||
|
PermSign auth.Permission = "sign" // Use wallet keys for signing
|
||||||
|
PermAdmin auth.Permission = "admin" // Manage permissions
|
||||||
|
)
|
||||||
|
|
||||||
|
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
|
||||||
|
var DefaultPerms = []auth.Permission{PermRead}
|
||||||
|
|
||||||
|
func permissionedProxies(in, out interface{}) {
|
||||||
|
outs := GetInternalStructs(out)
|
||||||
|
for _, o := range outs {
|
||||||
|
auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
|
||||||
|
var out StorageMinerStruct
|
||||||
|
permissionedProxies(a, &out)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
|
|
||||||
|
func PermissionedFullAPI(a FullNode) FullNode {
|
||||||
|
var out FullNodeStruct
|
||||||
|
permissionedProxies(a, &out)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
|
|
||||||
|
func PermissionedWorkerAPI(a Worker) Worker {
|
||||||
|
var out WorkerStruct
|
||||||
|
permissionedProxies(a, &out)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
|
|
||||||
|
func PermissionedWalletAPI(a Wallet) Wallet {
|
||||||
|
var out WalletStruct
|
||||||
|
permissionedProxies(a, &out)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
3763
api/proxy_gen.go
Normal file
3763
api/proxy_gen.go
Normal file
File diff suppressed because it is too large
Load Diff
30
api/proxy_util.go
Normal file
30
api/proxy_util.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
var _internalField = "Internal"
|
||||||
|
|
||||||
|
// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct
|
||||||
|
func GetInternalStructs(in interface{}) []interface{} {
|
||||||
|
return getInternalStructs(reflect.ValueOf(in).Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
func getInternalStructs(rv reflect.Value) []interface{} {
|
||||||
|
var out []interface{}
|
||||||
|
|
||||||
|
internal := rv.FieldByName(_internalField)
|
||||||
|
ii := internal.Addr().Interface()
|
||||||
|
out = append(out, ii)
|
||||||
|
|
||||||
|
for i := 0; i < rv.NumField(); i++ {
|
||||||
|
if rv.Type().Field(i).Name == _internalField {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sub := getInternalStructs(rv.Field(i))
|
||||||
|
|
||||||
|
out = append(out, sub...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
62
api/proxy_util_test.go
Normal file
62
api/proxy_util_test.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type StrA struct {
|
||||||
|
StrB
|
||||||
|
|
||||||
|
Internal struct {
|
||||||
|
A int
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StrB struct {
|
||||||
|
Internal struct {
|
||||||
|
B int
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type StrC struct {
|
||||||
|
Internal struct {
|
||||||
|
Internal struct {
|
||||||
|
C int
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetInternalStructs(t *testing.T) {
|
||||||
|
var proxy StrA
|
||||||
|
|
||||||
|
sts := GetInternalStructs(&proxy)
|
||||||
|
require.Len(t, sts, 2)
|
||||||
|
|
||||||
|
sa := sts[0].(*struct{ A int })
|
||||||
|
sa.A = 3
|
||||||
|
sb := sts[1].(*struct{ B int })
|
||||||
|
sb.B = 4
|
||||||
|
|
||||||
|
require.Equal(t, 3, proxy.Internal.A)
|
||||||
|
require.Equal(t, 4, proxy.StrB.Internal.B)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNestedInternalStructs(t *testing.T) {
|
||||||
|
var proxy StrC
|
||||||
|
|
||||||
|
// check that only the top-level internal struct gets picked up
|
||||||
|
|
||||||
|
sts := GetInternalStructs(&proxy)
|
||||||
|
require.Len(t, sts, 1)
|
||||||
|
|
||||||
|
sa := sts[0].(*struct {
|
||||||
|
Internal struct {
|
||||||
|
C int
|
||||||
|
}
|
||||||
|
})
|
||||||
|
sa.Internal.C = 5
|
||||||
|
|
||||||
|
require.Equal(t, 5, proxy.Internal.Internal.C)
|
||||||
|
}
|
||||||
@ -1,56 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BlockMiner struct {
|
|
||||||
ctx context.Context
|
|
||||||
t *testing.T
|
|
||||||
miner TestStorageNode
|
|
||||||
blocktime time.Duration
|
|
||||||
mine int64
|
|
||||||
nulls int64
|
|
||||||
done chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
|
|
||||||
return &BlockMiner{
|
|
||||||
ctx: ctx,
|
|
||||||
t: t,
|
|
||||||
miner: miner,
|
|
||||||
blocktime: blocktime,
|
|
||||||
mine: int64(1),
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bm *BlockMiner) MineBlocks() {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
go func() {
|
|
||||||
defer close(bm.done)
|
|
||||||
for atomic.LoadInt64(&bm.mine) == 1 {
|
|
||||||
time.Sleep(bm.blocktime)
|
|
||||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
|
||||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
|
||||||
InjectNulls: abi.ChainEpoch(nulls),
|
|
||||||
Done: func(bool, abi.ChainEpoch, error) {},
|
|
||||||
}); err != nil {
|
|
||||||
bm.t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bm *BlockMiner) Stop() {
|
|
||||||
atomic.AddInt64(&bm.mine, -1)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-bm.done
|
|
||||||
}
|
|
||||||
@ -1,127 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|
||||||
for _, height := range []abi.ChainEpoch{
|
|
||||||
1, // before
|
|
||||||
162, // while sealing
|
|
||||||
520, // after upgrade deal
|
|
||||||
5000, // after
|
|
||||||
} {
|
|
||||||
height := height // make linters happy by copying
|
|
||||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
|
||||||
testCCUpgrade(t, b, blocktime, height)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, upgradeHeight abi.ChainEpoch) {
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
CC := abi.SectorNumber(GenesisPreseals + 1)
|
|
||||||
Upgraded := CC + 1
|
|
||||||
|
|
||||||
pledgeSectors(t, ctx, miner, 1, 0, nil)
|
|
||||||
|
|
||||||
sl, err := miner.SectorsList(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(sl) != 1 {
|
|
||||||
t.Fatal("expected 1 sector")
|
|
||||||
}
|
|
||||||
|
|
||||||
if sl[0] != CC {
|
|
||||||
t.Fatal("bad")
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Less(t, 50000, int(si.Expiration))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
MakeDeal(t, ctx, 6, client, miner, false, false)
|
|
||||||
|
|
||||||
// Validate upgrade
|
|
||||||
|
|
||||||
{
|
|
||||||
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, exp)
|
|
||||||
require.Greater(t, 50000, int(exp.OnTime))
|
|
||||||
}
|
|
||||||
{
|
|
||||||
exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Less(t, 50000, int(exp.OnTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Sector should expire.
|
|
||||||
for {
|
|
||||||
// Wait for the sector to expire.
|
|
||||||
status, err := miner.SectorsStatus(ctx, CC, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if status.OnTime == 0 && status.Early == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t.Log("waiting for sector to expire")
|
|
||||||
// wait one deadline per loop.
|
|
||||||
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
@ -1,458 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
files "github.com/ipfs/go-ipfs-files"
|
|
||||||
"github.com/ipld/go-car"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
|
||||||
dag "github.com/ipfs/go-merkledag"
|
|
||||||
dstest "github.com/ipfs/go-merkledag/test"
|
|
||||||
unixfile "github.com/ipfs/go-unixfs/file"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
ipld "github.com/ipfs/go-ipld-format"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) {
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, OneFull, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
MakeDeal(t, ctx, 6, client, miner, carExport, fastRet)
|
|
||||||
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, OneFull, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
MakeDeal(t, ctx, 6, client, miner, false, false)
|
|
||||||
MakeDeal(t, ctx, 7, client, miner, false, false)
|
|
||||||
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool) {
|
|
||||||
res, data, err := CreateClientFile(ctx, client, rseed)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fcid := res.Root
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
deal := startDeal(t, ctx, miner, client, fcid, fastRet)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
waitDealSealed(t, ctx, miner, client, deal, false)
|
|
||||||
|
|
||||||
// Retrieval
|
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateClientFile(ctx context.Context, client api.FullNode, rseed int) (*api.ImportRes, []byte, error) {
|
|
||||||
data := make([]byte, 1600)
|
|
||||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
|
||||||
|
|
||||||
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(dir, "sourcefile.dat")
|
|
||||||
err = ioutil.WriteFile(path, data, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := client.ClientImport(ctx, api.FileRef{Path: path})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return res, data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, OneFull, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
data := make([]byte, 1600)
|
|
||||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
|
||||||
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
fcid, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
deal := startDeal(t, ctx, miner, client, fcid, true)
|
|
||||||
|
|
||||||
waitDealPublished(t, ctx, miner, deal)
|
|
||||||
fmt.Println("deal published, retrieving")
|
|
||||||
// Retrieval
|
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, OneFull, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) == 1 {
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
{
|
|
||||||
data1 := make([]byte, 800)
|
|
||||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
|
||||||
r := bytes.NewReader(data1)
|
|
||||||
|
|
||||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
data2 := make([]byte, 800)
|
|
||||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
|
||||||
r2 := bytes.NewReader(data2)
|
|
||||||
|
|
||||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
deal1 := startDeal(t, ctx, miner, client, fcid1, true)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
waitDealSealed(t, ctx, miner, client, deal1, true)
|
|
||||||
|
|
||||||
deal2 := startDeal(t, ctx, miner, client, fcid2, true)
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
waitDealSealed(t, ctx, miner, client, deal2, false)
|
|
||||||
|
|
||||||
// Retrieval
|
|
||||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rf, _ := miner.SectorsRefs(ctx)
|
|
||||||
fmt.Printf("refs: %+v\n", rf)
|
|
||||||
|
|
||||||
testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic.AddInt64(&mine, -1)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool) *cid.Cid {
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
addr, err := client.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
|
||||||
Data: &storagemarket.DataRef{
|
|
||||||
TransferType: storagemarket.TTGraphsync,
|
|
||||||
Root: fcid,
|
|
||||||
},
|
|
||||||
Wallet: addr,
|
|
||||||
Miner: maddr,
|
|
||||||
EpochPrice: types.NewInt(1000000),
|
|
||||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
|
||||||
FastRetrieval: fastRet,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("%+v", err)
|
|
||||||
}
|
|
||||||
return deal
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal bool) {
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
di, err := client.ClientGetDealInfo(ctx, *deal)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
switch di.State {
|
|
||||||
case storagemarket.StorageDealSealing:
|
|
||||||
if noseal {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
startSealingWaiting(t, ctx, miner)
|
|
||||||
case storagemarket.StorageDealProposalRejected:
|
|
||||||
t.Fatal("deal rejected")
|
|
||||||
case storagemarket.StorageDealFailing:
|
|
||||||
t.Fatal("deal failed")
|
|
||||||
case storagemarket.StorageDealError:
|
|
||||||
t.Fatal("deal errored", di.Message)
|
|
||||||
case storagemarket.StorageDealActive:
|
|
||||||
fmt.Println("COMPLETE", di)
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
|
||||||
time.Sleep(time.Second / 2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
|
|
||||||
subCtx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
updates, err := miner.MarketGetDealUpdates(subCtx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Fatal("context timeout")
|
|
||||||
case di := <-updates:
|
|
||||||
if deal.Equals(di.ProposalCid) {
|
|
||||||
switch di.State {
|
|
||||||
case storagemarket.StorageDealProposalRejected:
|
|
||||||
t.Fatal("deal rejected")
|
|
||||||
case storagemarket.StorageDealFailing:
|
|
||||||
t.Fatal("deal failed")
|
|
||||||
case storagemarket.StorageDealError:
|
|
||||||
t.Fatal("deal errored", di.Message)
|
|
||||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
|
||||||
fmt.Println("COMPLETE", di)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
|
|
||||||
snums, err := miner.SectorsList(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
for _, snum := range snums {
|
|
||||||
si, err := miner.SectorsStatus(ctx, snum, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
t.Logf("Sector state: %s", si.State)
|
|
||||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
|
||||||
require.NoError(t, miner.SectorStartSealing(ctx, snum))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
|
|
||||||
offers, err := client.ClientFindData(ctx, fcid, piece)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(offers) < 1 {
|
|
||||||
t.Fatal("no offers")
|
|
||||||
}
|
|
||||||
|
|
||||||
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
|
||||||
|
|
||||||
caddr, err := client.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ref := &api.FileRef{
|
|
||||||
Path: filepath.Join(rpath, "ret"),
|
|
||||||
IsCAR: carExport,
|
|
||||||
}
|
|
||||||
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
for update := range updates {
|
|
||||||
if update.Err != "" {
|
|
||||||
t.Fatalf("retrieval failed: %s", update.Err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if carExport {
|
|
||||||
rdata = extractCarData(t, ctx, rdata, rpath)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(rdata, data) {
|
|
||||||
t.Fatal("wrong data retrieved")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
|
|
||||||
bserv := dstest.Bserv()
|
|
||||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
nd, err := ipld.Decode(b)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
dserv := dag.NewDAGService(bserv)
|
|
||||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
|
||||||
if err := files.WriteTo(fil, outPath); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
rdata, err = ioutil.ReadFile(outPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return rdata
|
|
||||||
}
|
|
||||||
@ -1,201 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
)
|
|
||||||
|
|
||||||
//nolint:deadcode,varcheck
|
|
||||||
var log = logging.Logger("apitest")
|
|
||||||
|
|
||||||
func (ts *testSuite) testMining(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
|
|
||||||
api := apis[0]
|
|
||||||
|
|
||||||
newHeads, err := api.ChainNotify(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
initHead := (<-newHeads)[0]
|
|
||||||
baseHeight := initHead.Val.Height()
|
|
||||||
|
|
||||||
h1, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
|
||||||
|
|
||||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
<-newHeads
|
|
||||||
|
|
||||||
h2, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testSuite) testMiningReal(t *testing.T) {
|
|
||||||
build.InsecurePoStValidation = false
|
|
||||||
defer func() {
|
|
||||||
build.InsecurePoStValidation = true
|
|
||||||
}()
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
|
|
||||||
api := apis[0]
|
|
||||||
|
|
||||||
newHeads, err := api.ChainNotify(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
at := (<-newHeads)[0].Val.Height()
|
|
||||||
|
|
||||||
h1, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, int64(at), int64(h1.Height()))
|
|
||||||
|
|
||||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
<-newHeads
|
|
||||||
|
|
||||||
h2, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
|
||||||
|
|
||||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
<-newHeads
|
|
||||||
|
|
||||||
h3, err := api.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
|
|
||||||
// test making a deal with a fresh miner, and see if it starts to mine
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
n, sn := b(t, OneFull, []StorageMiner{
|
|
||||||
{Full: 0, Preseal: PresealGenesis},
|
|
||||||
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
|
|
||||||
})
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
provider := sn[1]
|
|
||||||
genesisMiner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := provider.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
data := make([]byte, 600)
|
|
||||||
rand.New(rand.NewSource(5)).Read(data)
|
|
||||||
|
|
||||||
r := bytes.NewReader(data)
|
|
||||||
fcid, err := client.ClientImportLocal(ctx, r)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println("FILE CID: ", fcid)
|
|
||||||
|
|
||||||
var mine int32 = 1
|
|
||||||
done := make(chan struct{})
|
|
||||||
minedTwo := make(chan struct{})
|
|
||||||
|
|
||||||
m2addr, err := sn[1].ActorAddress(context.TODO())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
complChan := minedTwo
|
|
||||||
for atomic.LoadInt32(&mine) != 0 {
|
|
||||||
wait := make(chan int)
|
|
||||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
|
||||||
n := 0
|
|
||||||
if mined {
|
|
||||||
n = 1
|
|
||||||
}
|
|
||||||
wait <- n
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expect := <-wait
|
|
||||||
expect += <-wait
|
|
||||||
|
|
||||||
time.Sleep(blocktime)
|
|
||||||
if expect == 0 {
|
|
||||||
// null block
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeOneMined bool
|
|
||||||
for _, node := range sn {
|
|
||||||
mb, err := node.MiningBase(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, b := range mb.Blocks() {
|
|
||||||
if b.Miner == m2addr {
|
|
||||||
nodeOneMined = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeOneMined && complChan != nil {
|
|
||||||
close(complChan)
|
|
||||||
complChan = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
deal := startDeal(t, ctx, provider, client, fcid, false)
|
|
||||||
|
|
||||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
waitDealSealed(t, ctx, provider, client, deal, false)
|
|
||||||
|
|
||||||
<-minedTwo
|
|
||||||
|
|
||||||
atomic.StoreInt32(&mine, 0)
|
|
||||||
fmt.Println("shutting down mining")
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
114
api/test/tape.go
114
api/test/tape.go
@ -1,114 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
|
||||||
t.Run("before", func(t *testing.T) { testTapeFix(t, b, blocktime, false) })
|
|
||||||
t.Run("after", func(t *testing.T) { testTapeFix(t, b, blocktime, true) })
|
|
||||||
}
|
|
||||||
func testTapeFix(t *testing.T, b APIBuilder, blocktime time.Duration, after bool) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
upgradeSchedule := stmgr.UpgradeSchedule{{
|
|
||||||
Network: build.ActorUpgradeNetworkVersion,
|
|
||||||
Height: 1,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}}
|
|
||||||
if after {
|
|
||||||
upgradeSchedule = append(upgradeSchedule, stmgr.Upgrade{
|
|
||||||
Network: network.Version5,
|
|
||||||
Height: 2,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
n, sn := b(t, []FullNodeOpts{{Opts: func(_ []TestNode) node.Option {
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), upgradeSchedule)
|
|
||||||
}}}, OneMiner)
|
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
err = miner.PledgeSector(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait till done.
|
|
||||||
var sectorNo abi.SectorNumber
|
|
||||||
for {
|
|
||||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Printf("Sectors: %d\n", len(s))
|
|
||||||
if len(s) == 1 {
|
|
||||||
sectorNo = s[0]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
|
||||||
|
|
||||||
// If before, we expect the precommit to fail
|
|
||||||
successState := api.SectorState(sealing.CommitFailed)
|
|
||||||
failureState := api.SectorState(sealing.Proving)
|
|
||||||
if after {
|
|
||||||
// otherwise, it should succeed.
|
|
||||||
successState, failureState = failureState, successState
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
st, err := miner.SectorsStatus(ctx, sectorNo, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if st.State == successState {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NotEqual(t, failureState, st.State)
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
fmt.Println("WaitSeal")
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
243
api/test/test.go
243
api/test/test.go
@ -1,243 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
|
|
||||||
logging "github.com/ipfs/go-log/v2"
|
|
||||||
"github.com/multiformats/go-multiaddr"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
|
||||||
"github.com/filecoin-project/go-state-types/network"
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
logging.SetAllLoggers(logging.LevelInfo)
|
|
||||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
|
||||||
}
|
|
||||||
build.InsecurePoStValidation = true
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestNode struct {
|
|
||||||
api.FullNode
|
|
||||||
// ListenAddr is the address on which an API server is listening, if an
|
|
||||||
// API server is created for this Node
|
|
||||||
ListenAddr multiaddr.Multiaddr
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestStorageNode struct {
|
|
||||||
api.StorageMiner
|
|
||||||
// ListenAddr is the address on which an API server is listening, if an
|
|
||||||
// API server is created for this Node
|
|
||||||
ListenAddr multiaddr.Multiaddr
|
|
||||||
|
|
||||||
MineOne func(context.Context, miner.MineReq) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var PresealGenesis = -1
|
|
||||||
|
|
||||||
const GenesisPreseals = 2
|
|
||||||
|
|
||||||
// Options for setting up a mock storage miner
|
|
||||||
type StorageMiner struct {
|
|
||||||
Full int
|
|
||||||
Preseal int
|
|
||||||
}
|
|
||||||
|
|
||||||
type OptionGenerator func([]TestNode) node.Option
|
|
||||||
|
|
||||||
// Options for setting up a mock full node
|
|
||||||
type FullNodeOpts struct {
|
|
||||||
Lite bool // run node in "lite" mode
|
|
||||||
Opts OptionGenerator // generate dependency injection options
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIBuilder is a function which is invoked in test suite to provide
|
|
||||||
// test nodes and networks
|
|
||||||
//
|
|
||||||
// fullOpts array defines options for each full node
|
|
||||||
// storage array defines storage nodes, numbers in the array specify full node
|
|
||||||
// index the storage node 'belongs' to
|
|
||||||
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode)
|
|
||||||
type testSuite struct {
|
|
||||||
makeNodes APIBuilder
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestApis is the entry point to API test suite
|
|
||||||
func TestApis(t *testing.T, b APIBuilder) {
|
|
||||||
ts := testSuite{
|
|
||||||
makeNodes: b,
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("version", ts.testVersion)
|
|
||||||
t.Run("id", ts.testID)
|
|
||||||
t.Run("testConnectTwo", ts.testConnectTwo)
|
|
||||||
t.Run("testMining", ts.testMining)
|
|
||||||
t.Run("testMiningReal", ts.testMiningReal)
|
|
||||||
t.Run("testSearchMsg", ts.testSearchMsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultFullOpts(nFull int) []FullNodeOpts {
|
|
||||||
full := make([]FullNodeOpts, nFull)
|
|
||||||
for i := range full {
|
|
||||||
full[i] = FullNodeOpts{
|
|
||||||
Opts: func(nodes []TestNode) node.Option {
|
|
||||||
return node.Options()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return full
|
|
||||||
}
|
|
||||||
|
|
||||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
|
||||||
var OneFull = DefaultFullOpts(1)
|
|
||||||
var TwoFull = DefaultFullOpts(2)
|
|
||||||
|
|
||||||
var FullNodeWithUpgradeAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
|
|
||||||
return FullNodeOpts{
|
|
||||||
Opts: func(nodes []TestNode) node.Option {
|
|
||||||
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
|
|
||||||
// Skip directly to tape height so precommits work.
|
|
||||||
Network: network.Version5,
|
|
||||||
Height: upgradeHeight,
|
|
||||||
Migration: stmgr.UpgradeActorsV2,
|
|
||||||
}})
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var MineNext = miner.MineReq{
|
|
||||||
InjectNulls: 0,
|
|
||||||
Done: func(bool, abi.ChainEpoch, error) {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testSuite) testVersion(t *testing.T) {
|
|
||||||
build.RunningNodeType = build.NodeFull
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
|
|
||||||
api := apis[0]
|
|
||||||
|
|
||||||
v, err := api.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
require.Equal(t, v.Version, build.BuildVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testSuite) testSearchMsg(t *testing.T) {
|
|
||||||
apis, miners := ts.makeNodes(t, OneFull, OneMiner)
|
|
||||||
|
|
||||||
api := apis[0]
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
senderAddr, err := api.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
From: senderAddr,
|
|
||||||
To: senderAddr,
|
|
||||||
Value: big.Zero(),
|
|
||||||
}
|
|
||||||
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
|
|
||||||
bm.MineBlocks()
|
|
||||||
defer bm.Stop()
|
|
||||||
|
|
||||||
sm, err := api.MpoolPushMessage(ctx, msg, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send message")
|
|
||||||
}
|
|
||||||
|
|
||||||
searchRes, err := api.StateSearchMsg(ctx, sm.Cid())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if searchRes.TipSet != res.TipSet {
|
|
||||||
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testSuite) testID(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
|
|
||||||
api := apis[0]
|
|
||||||
|
|
||||||
id, err := api.ID(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
assert.Regexp(t, "^12", id.Pretty())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ts *testSuite) testConnectTwo(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
apis, _ := ts.makeNodes(t, TwoFull, OneMiner)
|
|
||||||
|
|
||||||
p, err := apis[0].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 0 {
|
|
||||||
t.Error("Node 0 has a peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[1].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 0 {
|
|
||||||
t.Error("Node 1 has a peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
addrs, err := apis[1].NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := apis[0].NetConnect(ctx, addrs); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[0].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 1 {
|
|
||||||
t.Error("Node 0 doesn't have 1 peer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = apis[1].NetPeers(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if len(p) != 1 {
|
|
||||||
t.Error("Node 0 doesn't have 1 peer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -1,86 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
"github.com/filecoin-project/lotus/miner"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
|
|
||||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := &types.Message{
|
|
||||||
From: senderAddr,
|
|
||||||
To: addr,
|
|
||||||
Value: amount,
|
|
||||||
}
|
|
||||||
|
|
||||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if res.Receipt.ExitCode != 0 {
|
|
||||||
t.Fatal("did not successfully send money")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
|
|
||||||
for i := 0; i < 1000; i++ {
|
|
||||||
var success bool
|
|
||||||
var err error
|
|
||||||
var epoch abi.ChainEpoch
|
|
||||||
wait := make(chan struct{})
|
|
||||||
mineErr := sn.MineOne(ctx, miner.MineReq{
|
|
||||||
Done: func(win bool, ep abi.ChainEpoch, e error) {
|
|
||||||
success = win
|
|
||||||
err = e
|
|
||||||
epoch = ep
|
|
||||||
wait <- struct{}{}
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if mineErr != nil {
|
|
||||||
t.Fatal(mineErr)
|
|
||||||
}
|
|
||||||
<-wait
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if success {
|
|
||||||
// Wait until it shows up on the given full nodes ChainHead
|
|
||||||
nloops := 50
|
|
||||||
for i := 0; i < nloops; i++ {
|
|
||||||
ts, err := fn.ChainHead(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if ts.Height() == epoch {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if i == nloops-1 {
|
|
||||||
t.Fatal("block never managed to sync to node")
|
|
||||||
}
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cb != nil {
|
|
||||||
cb(epoch)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Log("did not mine block, trying again", i)
|
|
||||||
}
|
|
||||||
t.Fatal("failed to mine 1000 times in a row...")
|
|
||||||
}
|
|
||||||
@ -1,336 +0,0 @@
|
|||||||
package test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
|
||||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
bminer "github.com/filecoin-project/lotus/miner"
|
|
||||||
"github.com/filecoin-project/lotus/node/impl"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
n, sn := b(t, OneFull, OneMiner)
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
mine := int64(1)
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for atomic.LoadInt64(&mine) != 0 {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
|
||||||
|
|
||||||
}}); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
atomic.StoreInt64(&mine, 0)
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
|
|
||||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
err := miner.PledgeSector(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if i%3 == 0 && blockNotif != nil {
|
|
||||||
<-blockNotif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Printf("Sectors: %d\n", len(s))
|
|
||||||
if len(s) >= n+existing {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("All sectors is fsm\n")
|
|
||||||
|
|
||||||
s, err := miner.SectorsList(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
toCheck := map[abi.SectorNumber]struct{}{}
|
|
||||||
for _, number := range s {
|
|
||||||
toCheck[number] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(toCheck) > 0 {
|
|
||||||
for n := range toCheck {
|
|
||||||
st, err := miner.SectorsStatus(ctx, n, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
if st.State == api.SectorState(sealing.Proving) {
|
|
||||||
delete(toCheck, n)
|
|
||||||
}
|
|
||||||
if strings.Contains(string(st.State), "Fail") {
|
|
||||||
t.Fatal("sector in a failed state", st.State)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(100 * time.Millisecond)
|
|
||||||
fmt.Printf("WaitSeal: %d\n", len(s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
|
||||||
for _, height := range []abi.ChainEpoch{
|
|
||||||
1, // before
|
|
||||||
162, // while sealing
|
|
||||||
5000, // while proving
|
|
||||||
} {
|
|
||||||
height := height // copy to satisfy lints
|
|
||||||
t.Run(fmt.Sprintf("upgrade-%d", height), func(t *testing.T) {
|
|
||||||
testWindowPostUpgrade(t, b, blocktime, nSectors, height)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
func testWindowPostUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int,
|
|
||||||
upgradeHeight abi.ChainEpoch) {
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
n, sn := b(t, []FullNodeOpts{FullNodeWithUpgradeAt(upgradeHeight)}, OneMiner)
|
|
||||||
|
|
||||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
|
||||||
miner := sn[0]
|
|
||||||
|
|
||||||
addrinfo, err := client.NetAddrsListen(ctx)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(time.Second)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
defer close(done)
|
|
||||||
for ctx.Err() == nil {
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
// context was canceled, ignore the error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
cancel()
|
|
||||||
<-done
|
|
||||||
}()
|
|
||||||
|
|
||||||
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
|
||||||
|
|
||||||
maddr, err := miner.ActorAddress(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
mid, err := address.IDFromAddress(maddr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Printf("Running one proving period\n")
|
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
|
||||||
|
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
||||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals)))
|
|
||||||
|
|
||||||
fmt.Printf("Drop some sectors\n")
|
|
||||||
|
|
||||||
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
|
||||||
{
|
|
||||||
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, len(parts), 0)
|
|
||||||
|
|
||||||
secs := parts[0].AllSectors
|
|
||||||
n, err := secs.Count()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, uint64(2), n)
|
|
||||||
|
|
||||||
// Drop the partition
|
|
||||||
err = secs.ForEach(func(sid uint64) error {
|
|
||||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
|
||||||
Miner: abi.ActorID(mid),
|
|
||||||
Number: abi.SectorNumber(sid),
|
|
||||||
}, true)
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var s abi.SectorID
|
|
||||||
|
|
||||||
// Drop 1 sectors from deadline 3 partition 0
|
|
||||||
{
|
|
||||||
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Greater(t, len(parts), 0)
|
|
||||||
|
|
||||||
secs := parts[0].AllSectors
|
|
||||||
n, err := secs.Count()
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, uint64(2), n)
|
|
||||||
|
|
||||||
// Drop the sector
|
|
||||||
sn, err := secs.First()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
all, err := secs.All(2)
|
|
||||||
require.NoError(t, err)
|
|
||||||
fmt.Println("the sectors", all)
|
|
||||||
|
|
||||||
s = abi.SectorID{
|
|
||||||
Miner: abi.ActorID(mid),
|
|
||||||
Number: abi.SectorNumber(sn),
|
|
||||||
}
|
|
||||||
|
|
||||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
|
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
|
||||||
|
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
||||||
|
|
||||||
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
|
||||||
require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors
|
|
||||||
|
|
||||||
fmt.Printf("Recover one sector\n")
|
|
||||||
|
|
||||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fmt.Printf("End for head.Height > %d\n", di.PeriodStart+di.WPoStProvingPeriod+2)
|
|
||||||
|
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
build.Clock.Sleep(blocktime)
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
||||||
|
|
||||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
|
||||||
require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
|
|
||||||
|
|
||||||
// pledge a sector after recovery
|
|
||||||
|
|
||||||
pledgeSectors(t, ctx, miner, 1, nSectors, nil)
|
|
||||||
|
|
||||||
{
|
|
||||||
// Wait until proven.
|
|
||||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
waitUntil := di.PeriodStart + di.WPoStProvingPeriod + 2
|
|
||||||
fmt.Printf("End for head.Height > %d\n", waitUntil)
|
|
||||||
|
|
||||||
for {
|
|
||||||
head, err := client.ChainHead(ctx)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if head.Height() > waitUntil {
|
|
||||||
fmt.Printf("Now head.Height = %d\n", head.Height())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
|
||||||
|
|
||||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
|
||||||
require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
|
||||||
}
|
|
||||||
116
api/types.go
116
api/types.go
@ -3,10 +3,13 @@ package api
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
|
||||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
@ -51,19 +54,6 @@ type MessageSendSpec struct {
|
|||||||
MaxFee abi.TokenAmount
|
MaxFee abi.TokenAmount
|
||||||
}
|
}
|
||||||
|
|
||||||
var DefaultMessageSendSpec = MessageSendSpec{
|
|
||||||
// MaxFee of 0.1FIL
|
|
||||||
MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10),
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ms *MessageSendSpec) Get() MessageSendSpec {
|
|
||||||
if ms == nil {
|
|
||||||
return DefaultMessageSendSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
return *ms
|
|
||||||
}
|
|
||||||
|
|
||||||
type DataTransferChannel struct {
|
type DataTransferChannel struct {
|
||||||
TransferID datatransfer.TransferID
|
TransferID datatransfer.TransferID
|
||||||
Status datatransfer.Status
|
Status datatransfer.Status
|
||||||
@ -74,6 +64,7 @@ type DataTransferChannel struct {
|
|||||||
Message string
|
Message string
|
||||||
OtherPeer peer.ID
|
OtherPeer peer.ID
|
||||||
Transferred uint64
|
Transferred uint64
|
||||||
|
Stages *datatransfer.ChannelStages
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
|
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
|
||||||
@ -107,3 +98,100 @@ func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelSta
|
|||||||
}
|
}
|
||||||
return channel
|
return channel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type NetBlockList struct {
|
||||||
|
Peers []peer.ID
|
||||||
|
IPAddrs []string
|
||||||
|
IPSubnets []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExtendedPeerInfo struct {
|
||||||
|
ID peer.ID
|
||||||
|
Agent string
|
||||||
|
Addrs []string
|
||||||
|
Protocols []string
|
||||||
|
ConnMgrMeta *ConnMgrInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConnMgrInfo struct {
|
||||||
|
FirstSeen time.Time
|
||||||
|
Value int
|
||||||
|
Tags map[string]int
|
||||||
|
Conns map[string]time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeStatus struct {
|
||||||
|
SyncStatus NodeSyncStatus
|
||||||
|
PeerStatus NodePeerStatus
|
||||||
|
ChainStatus NodeChainStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeSyncStatus struct {
|
||||||
|
Epoch uint64
|
||||||
|
Behind uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodePeerStatus struct {
|
||||||
|
PeersToPublishMsgs int
|
||||||
|
PeersToPublishBlocks int
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeChainStatus struct {
|
||||||
|
BlocksPerTipsetLast100 float64
|
||||||
|
BlocksPerTipsetLastFinality float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type CheckStatusCode int
|
||||||
|
|
||||||
|
//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus
|
||||||
|
const (
|
||||||
|
_ CheckStatusCode = iota
|
||||||
|
// Message Checks
|
||||||
|
CheckStatusMessageSerialize
|
||||||
|
CheckStatusMessageSize
|
||||||
|
CheckStatusMessageValidity
|
||||||
|
CheckStatusMessageMinGas
|
||||||
|
CheckStatusMessageMinBaseFee
|
||||||
|
CheckStatusMessageBaseFee
|
||||||
|
CheckStatusMessageBaseFeeLowerBound
|
||||||
|
CheckStatusMessageBaseFeeUpperBound
|
||||||
|
CheckStatusMessageGetStateNonce
|
||||||
|
CheckStatusMessageNonce
|
||||||
|
CheckStatusMessageGetStateBalance
|
||||||
|
CheckStatusMessageBalance
|
||||||
|
)
|
||||||
|
|
||||||
|
type CheckStatus struct {
|
||||||
|
Code CheckStatusCode
|
||||||
|
OK bool
|
||||||
|
Err string
|
||||||
|
Hint map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessageCheckStatus struct {
|
||||||
|
Cid cid.Cid
|
||||||
|
CheckStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
type MessagePrototype struct {
|
||||||
|
Message types.Message
|
||||||
|
ValidNonce bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrievalInfo struct {
|
||||||
|
PayloadCID cid.Cid
|
||||||
|
ID retrievalmarket.DealID
|
||||||
|
PieceCID *cid.Cid
|
||||||
|
PricePerByte abi.TokenAmount
|
||||||
|
UnsealPrice abi.TokenAmount
|
||||||
|
|
||||||
|
Status retrievalmarket.DealStatus
|
||||||
|
Message string // more information about deal state, particularly errors
|
||||||
|
Provider peer.ID
|
||||||
|
BytesReceived uint64
|
||||||
|
BytesPaidFor uint64
|
||||||
|
TotalPaid abi.TokenAmount
|
||||||
|
|
||||||
|
TransferChannelID *datatransfer.ChannelID
|
||||||
|
DataTransfer *DataTransferChannel
|
||||||
|
}
|
||||||
|
|||||||
5
api/types/actors.go
Normal file
5
api/types/actors.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package apitypes
|
||||||
|
|
||||||
|
import "github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
type NetworkVersion = network.Version
|
||||||
3
api/types/openrpc.go
Normal file
3
api/types/openrpc.go
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
package apitypes
|
||||||
|
|
||||||
|
type OpenRPCDocument map[string]interface{}
|
||||||
711
api/v0api/full.go
Normal file
711
api/v0api/full.go
Normal file
@ -0,0 +1,711 @@
|
|||||||
|
package v0api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
"github.com/filecoin-project/go-multistore"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
|
||||||
|
|
||||||
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
|
||||||
|
// you'll need to make sure they are also present on the V1 (Unstable) API
|
||||||
|
//
|
||||||
|
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
|
||||||
|
// by the V1 api
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
|
// FullNode API is a low-level interface to the Filecoin network full node
|
||||||
|
type FullNode interface {
|
||||||
|
Common
|
||||||
|
Net
|
||||||
|
|
||||||
|
// MethodGroup: Chain
|
||||||
|
// The Chain method group contains methods for interacting with the
|
||||||
|
// blockchain, but that do not require any form of state computation.
|
||||||
|
|
||||||
|
// ChainNotify returns channel with chain head updates.
|
||||||
|
// First message is guaranteed to be of len == 1, and type == 'current'.
|
||||||
|
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) //perm:read
|
||||||
|
|
||||||
|
// ChainHead returns the current head of the chain.
|
||||||
|
ChainHead(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||||
|
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||||
|
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetBlock returns the block specified by the given CID.
|
||||||
|
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
|
||||||
|
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
|
||||||
|
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetBlockMessages returns messages stored in the specified block.
|
||||||
|
//
|
||||||
|
// Note: If there are multiple blocks in a tipset, it's likely that some
|
||||||
|
// messages will be duplicated. It's also possible for blocks in a tipset to have
|
||||||
|
// different messages from the same sender at the same nonce. When that happens,
|
||||||
|
// only the first message (in a block with lowest ticket) will be considered
|
||||||
|
// for execution
|
||||||
|
//
|
||||||
|
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
|
||||||
|
//
|
||||||
|
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
|
||||||
|
// Use ChainGetParentMessages, which will perform correct message deduplication
|
||||||
|
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetParentReceipts returns receipts for messages in parent tipset of
|
||||||
|
// the specified block. The receipts in the list returned is one-to-one with the
|
||||||
|
// messages returned by a call to ChainGetParentMessages with the same blockCid.
|
||||||
|
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetParentMessages returns messages stored in parent tipset of the
|
||||||
|
// specified block.
|
||||||
|
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetMessagesInTipset returns message stores in current tipset
|
||||||
|
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
|
||||||
|
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
|
||||||
|
// will be returned.
|
||||||
|
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
|
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
|
||||||
|
// blockstore and returns raw bytes.
|
||||||
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
|
||||||
|
|
||||||
|
// ChainDeleteObj deletes node referenced by the given CID
|
||||||
|
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
|
||||||
|
|
||||||
|
// ChainHasObj checks if a given CID exists in the chain blockstore.
|
||||||
|
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
|
||||||
|
|
||||||
|
// ChainStatObj returns statistics about the graph referenced by 'obj'.
|
||||||
|
// If 'base' is also specified, then the returned stat will be a diff
|
||||||
|
// between the two objects.
|
||||||
|
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) //perm:read
|
||||||
|
|
||||||
|
// ChainSetHead forcefully sets current chain head. Use with caution.
|
||||||
|
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
|
||||||
|
|
||||||
|
// ChainGetGenesis returns the genesis tipset.
|
||||||
|
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
|
||||||
|
|
||||||
|
// ChainTipSetWeight computes weight for the specified tipset.
|
||||||
|
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetMessage reads a message referenced by the specified CID from the
|
||||||
|
// chain blockstore.
|
||||||
|
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
|
||||||
|
|
||||||
|
// ChainGetPath returns a set of revert/apply operations needed to get from
|
||||||
|
// one tipset to another, for example:
|
||||||
|
//```
|
||||||
|
// to
|
||||||
|
// ^
|
||||||
|
// from tAA
|
||||||
|
// ^ ^
|
||||||
|
// tBA tAB
|
||||||
|
// ^---*--^
|
||||||
|
// ^
|
||||||
|
// tRR
|
||||||
|
//```
|
||||||
|
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
||||||
|
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read
|
||||||
|
|
||||||
|
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||||
|
// The exported chain data includes the header chain from the given tipset
|
||||||
|
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||||
|
// state trees.
|
||||||
|
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||||
|
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
|
||||||
|
|
||||||
|
// MethodGroup: Beacon
|
||||||
|
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||||
|
|
||||||
|
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
|
||||||
|
// the entry has not yet been produced, the call will block until the entry
|
||||||
|
// becomes available
|
||||||
|
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
|
||||||
|
|
||||||
|
// GasEstimateFeeCap estimates gas fee cap
|
||||||
|
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
|
// GasEstimateGasLimit estimates gas used by the message and returns it.
|
||||||
|
// It fails if message fails to execute.
|
||||||
|
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
|
||||||
|
|
||||||
|
// GasEstimateGasPremium estimates what gas price should be used for a
|
||||||
|
// message to have high likelihood of inclusion in `nblocksincl` epochs.
|
||||||
|
|
||||||
|
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
|
||||||
|
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
|
// GasEstimateMessageGas estimates gas values for unset message gas fields
|
||||||
|
GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
|
||||||
|
|
||||||
|
// MethodGroup: Sync
|
||||||
|
// The Sync method group contains methods for interacting with and
|
||||||
|
// observing the lotus sync service.
|
||||||
|
|
||||||
|
// SyncState returns the current status of the lotus sync system.
|
||||||
|
SyncState(context.Context) (*api.SyncState, error) //perm:read
|
||||||
|
|
||||||
|
// SyncSubmitBlock can be used to submit a newly created block to the.
|
||||||
|
// network through this node
|
||||||
|
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
|
||||||
|
|
||||||
|
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
|
||||||
|
// yet synced block headers.
|
||||||
|
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
|
||||||
|
|
||||||
|
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
|
||||||
|
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
|
||||||
|
|
||||||
|
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
||||||
|
// Use with extreme caution.
|
||||||
|
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
|
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
|
||||||
|
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
|
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
|
||||||
|
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
|
||||||
|
|
||||||
|
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
||||||
|
// the reason.
|
||||||
|
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
|
||||||
|
|
||||||
|
// SyncValidateTipset indicates whether the provided tipset is valid or not
|
||||||
|
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
|
||||||
|
|
||||||
|
// MethodGroup: Mpool
|
||||||
|
// The Mpool methods are for interacting with the message pool. The message pool
|
||||||
|
// manages all incoming and outgoing 'messages' going over the network.
|
||||||
|
|
||||||
|
// MpoolPending returns pending mempool messages.
|
||||||
|
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
|
||||||
|
|
||||||
|
// MpoolSelect returns a list of pending messages for inclusion in the next block
|
||||||
|
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
|
||||||
|
|
||||||
|
// MpoolPush pushes a signed message to mempool.
|
||||||
|
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
|
||||||
|
|
||||||
|
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
|
||||||
|
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
|
||||||
|
|
||||||
|
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
|
||||||
|
// to mempool.
|
||||||
|
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
|
||||||
|
//
|
||||||
|
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
|
||||||
|
// based on current chain conditions
|
||||||
|
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) //perm:sign
|
||||||
|
|
||||||
|
// MpoolBatchPush batch pushes a signed message to mempool.
|
||||||
|
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
|
||||||
|
|
||||||
|
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
|
||||||
|
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
|
||||||
|
|
||||||
|
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
|
||||||
|
MpoolBatchPushMessage(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
|
||||||
|
|
||||||
|
// MpoolGetNonce gets next nonce for the specified sender.
|
||||||
|
// Note that this method may not be atomic. Use MpoolPushMessage instead.
|
||||||
|
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
|
||||||
|
MpoolSub(context.Context) (<-chan api.MpoolUpdate, error) //perm:read
|
||||||
|
|
||||||
|
// MpoolClear clears pending messages from the mpool
|
||||||
|
MpoolClear(context.Context, bool) error //perm:write
|
||||||
|
|
||||||
|
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||||
|
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
|
||||||
|
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
||||||
|
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
|
||||||
|
|
||||||
|
// MethodGroup: Miner
|
||||||
|
|
||||||
|
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) //perm:read
|
||||||
|
MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) //perm:write
|
||||||
|
|
||||||
|
// // UX ?
|
||||||
|
|
||||||
|
// MethodGroup: Wallet
|
||||||
|
|
||||||
|
// WalletNew creates a new address in the wallet with the given sigType.
|
||||||
|
// Available key types: bls, secp256k1, secp256k1-ledger
|
||||||
|
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
|
||||||
|
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
|
||||||
|
// WalletHas indicates whether the given address is in the wallet.
|
||||||
|
WalletHas(context.Context, address.Address) (bool, error) //perm:write
|
||||||
|
// WalletList lists all the addresses in the wallet.
|
||||||
|
WalletList(context.Context) ([]address.Address, error) //perm:write
|
||||||
|
// WalletBalance returns the balance of the given address at the current head of the chain.
|
||||||
|
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
|
||||||
|
// WalletSign signs the given bytes using the given address.
|
||||||
|
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
|
||||||
|
// WalletSignMessage signs the given message using the given address.
|
||||||
|
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
|
||||||
|
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
|
||||||
|
// The address does not have to be in the wallet.
|
||||||
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
|
// WalletSetDefault marks the given address as as the default one.
|
||||||
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
|
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
|
||||||
|
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
|
||||||
|
// WalletDelete deletes an address from the wallet.
|
||||||
|
WalletDelete(context.Context, address.Address) error //perm:admin
|
||||||
|
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
|
||||||
|
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
|
||||||
|
|
||||||
|
// Other
|
||||||
|
|
||||||
|
// MethodGroup: Client
|
||||||
|
// The Client methods all have to do with interacting with the storage and
|
||||||
|
// retrieval markets as a client
|
||||||
|
|
||||||
|
// ClientImport imports file under the specified path into filestore.
|
||||||
|
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
|
||||||
|
// ClientRemoveImport removes file import
|
||||||
|
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
|
||||||
|
// ClientStartDeal proposes a deal with a miner.
|
||||||
|
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
|
||||||
|
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||||
|
ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
|
||||||
|
// ClientGetDealInfo returns the latest information about a given deal.
|
||||||
|
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
|
||||||
|
// ClientListDeals returns information about the deals made by the local client.
|
||||||
|
ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealUpdates returns the status of updated deals
|
||||||
|
ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealStatus returns status given a code
|
||||||
|
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||||
|
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||||
|
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||||
|
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||||
|
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
|
||||||
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
|
||||||
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
|
ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin
|
||||||
|
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||||
|
// of status updates.
|
||||||
|
ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
||||||
|
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||||
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
|
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
|
||||||
|
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||||
|
ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
|
||||||
|
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||||
|
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP for a specified file
|
||||||
|
ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
|
||||||
|
// ClientGenCar generates a CAR file for the specified file.
|
||||||
|
ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
|
||||||
|
// ClientDealSize calculates real deal data size
|
||||||
|
ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
|
||||||
|
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||||
|
ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
|
||||||
|
ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
|
||||||
|
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
|
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
|
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||||
|
// which are stuck due to insufficient funds
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||||
|
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||||
|
|
||||||
|
// ClientUnimport removes references to the specified file from filestore
|
||||||
|
//ClientUnimport(path string)
|
||||||
|
|
||||||
|
// ClientListImports lists imported files and their root CIDs
|
||||||
|
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
|
||||||
|
|
||||||
|
//ClientListAsks() []Ask
|
||||||
|
|
||||||
|
// MethodGroup: State
|
||||||
|
// The State methods are used to query, inspect, and interact with chain state.
|
||||||
|
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||||
|
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
|
||||||
|
|
||||||
|
// StateCall runs the given message and returns its result without any persisted changes.
|
||||||
|
//
|
||||||
|
// StateCall applies the message to the tipset's parent state. The
|
||||||
|
// message is not applied on-top-of the messages in the passed-in
|
||||||
|
// tipset.
|
||||||
|
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
|
||||||
|
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
|
||||||
|
//
|
||||||
|
// If a tipset key is provided, and a replacing message is found on chain,
|
||||||
|
// the method will return an error saying that the message wasn't found
|
||||||
|
//
|
||||||
|
// If no tipset key is provided, the appropriate tipset is looked up, and if
|
||||||
|
// the message was gas-repriced, the on-chain message will be replayed - in
|
||||||
|
// that case the returned InvocResult.MsgCid will not match the Cid param
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) //perm:read
|
||||||
|
// StateGetActor returns the indicated actor's nonce and balance.
|
||||||
|
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
|
||||||
|
// StateReadState returns the indicated actor's state.
|
||||||
|
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) //perm:read
|
||||||
|
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
|
||||||
|
StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
|
||||||
|
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
|
||||||
|
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
|
||||||
|
|
||||||
|
// StateNetworkName returns the name of the network the node is synced to
|
||||||
|
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
|
||||||
|
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
||||||
|
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
|
||||||
|
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
||||||
|
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
|
||||||
|
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
||||||
|
// and returns the deadline-related calculations.
|
||||||
|
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
|
||||||
|
// StateMinerPower returns the power of the indicated miner
|
||||||
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) //perm:read
|
||||||
|
// StateMinerInfo returns info about the indicated miner
|
||||||
|
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) //perm:read
|
||||||
|
// StateMinerDeadlines returns all the proving deadlines for the given miner
|
||||||
|
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) //perm:read
|
||||||
|
// StateMinerPartitions returns all partitions in the specified deadline
|
||||||
|
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) //perm:read
|
||||||
|
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
||||||
|
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
|
||||||
|
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
||||||
|
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*api.Fault, error) //perm:read
|
||||||
|
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
||||||
|
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
|
||||||
|
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
||||||
|
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
||||||
|
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
||||||
|
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
// StateMinerSectorAllocated checks if a sector is allocated
|
||||||
|
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
|
||||||
|
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||||
|
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
|
||||||
|
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||||
|
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||||
|
// expiration epoch
|
||||||
|
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
|
||||||
|
// StateSectorExpiration returns epoch at which given sector will expire
|
||||||
|
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error) //perm:read
|
||||||
|
// StateSectorPartition finds deadline/partition with the specified sector
|
||||||
|
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) //perm:read
|
||||||
|
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||||
|
//
|
||||||
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) //perm:read
|
||||||
|
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
|
||||||
|
//
|
||||||
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
|
||||||
|
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||||
|
// message arrives on chain, and gets to the indicated confidence depth.
|
||||||
|
//
|
||||||
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) //perm:read
|
||||||
|
// StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
|
||||||
|
// If not found, it blocks until the message arrives on chain, and gets to the
|
||||||
|
// indicated confidence depth.
|
||||||
|
//
|
||||||
|
// NOTE: If a replacing message is found on chain, this method will return
|
||||||
|
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
|
||||||
|
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
|
||||||
|
// result of the execution of the replacing message.
|
||||||
|
//
|
||||||
|
// If the caller wants to ensure that exactly the requested message was executed,
|
||||||
|
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
|
||||||
|
// Without this check both the requested and original message may appear as
|
||||||
|
// successfully executed on-chain, which may look like a double-spend.
|
||||||
|
//
|
||||||
|
// A replacing message is a message with a different CID, any of Gas values, and
|
||||||
|
// different signature, but with all other parameters matching (source/destination,
|
||||||
|
// nonce, params, etc.)
|
||||||
|
StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
|
||||||
|
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
|
||||||
|
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
|
||||||
|
// StateListActors returns the addresses of every actor in the state
|
||||||
|
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
|
||||||
|
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
|
||||||
|
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) //perm:read
|
||||||
|
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
|
||||||
|
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) //perm:read
|
||||||
|
// StateMarketDeals returns information about every deal in the Storage Market
|
||||||
|
StateMarketDeals(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) //perm:read
|
||||||
|
// StateMarketStorageDeal returns information about the indicated deal
|
||||||
|
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read
|
||||||
|
// StateLookupID retrieves the ID address of the given address
|
||||||
|
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
|
||||||
|
// StateAccountKey returns the public key address of the given ID address
|
||||||
|
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
|
||||||
|
// StateChangedActors returns all the actors whose states change between the two given state CIDs
|
||||||
|
// TODO: Should this take tipset keys instead?
|
||||||
|
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
|
||||||
|
// StateGetReceipt returns the message receipt for the given message or for a
|
||||||
|
// matching gas-repriced replacing message
|
||||||
|
//
|
||||||
|
// NOTE: If the requested message was replaced, this method will return the receipt
|
||||||
|
// for the replacing message - if the caller needs the receipt for exactly the
|
||||||
|
// requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message
|
||||||
|
// is matching the requested CID
|
||||||
|
//
|
||||||
|
// DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API
|
||||||
|
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) //perm:read
|
||||||
|
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
||||||
|
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) //perm:read
|
||||||
|
// StateCompute is a flexible command that applies the given messages on the given tipset.
|
||||||
|
// The messages are run as though the VM were at the provided height.
|
||||||
|
//
|
||||||
|
// When called, StateCompute will:
|
||||||
|
// - Load the provided tipset, or use the current chain head if not provided
|
||||||
|
// - Compute the tipset state of the provided tipset on top of the parent state
|
||||||
|
// - (note that this step runs before vmheight is applied to the execution)
|
||||||
|
// - Execute state upgrade if any were scheduled at the epoch, or in null
|
||||||
|
// blocks preceding the tipset
|
||||||
|
// - Call the cron actor on null blocks preceding the tipset
|
||||||
|
// - For each block in the tipset
|
||||||
|
// - Apply messages in blocks in the specified
|
||||||
|
// - Award block reward by calling the reward actor
|
||||||
|
// - Call the cron actor for the current epoch
|
||||||
|
// - If the specified vmheight is higher than the current epoch, apply any
|
||||||
|
// needed state upgrades to the state
|
||||||
|
// - Apply the specified messages to the state
|
||||||
|
//
|
||||||
|
// The vmheight parameter sets VM execution epoch, and can be used to simulate
|
||||||
|
// message execution in different network versions. If the specified vmheight
|
||||||
|
// epoch is higher than the epoch of the specified tipset, any state upgrades
|
||||||
|
// until the vmheight will be executed on the state before applying messages
|
||||||
|
// specified by the user.
|
||||||
|
//
|
||||||
|
// Note that the initial tipset state computation is not affected by the
|
||||||
|
// vmheight parameter - only the messages in the `apply` set are
|
||||||
|
//
|
||||||
|
// If the caller wants to simply compute the state, vmheight should be set to
|
||||||
|
// the epoch of the specified tipset.
|
||||||
|
//
|
||||||
|
// Messages in the `apply` parameter must have the correct nonces, and gas
|
||||||
|
// values set.
|
||||||
|
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) //perm:read
|
||||||
|
// StateVerifierStatus returns the data cap for the given address.
|
||||||
|
// Returns nil if there is no entry in the data cap table for the
|
||||||
|
// address.
|
||||||
|
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
|
||||||
|
// StateVerifiedClientStatus returns the data cap for the given address.
|
||||||
|
// Returns nil if there is no entry in the data cap table for the
|
||||||
|
// address.
|
||||||
|
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
|
||||||
|
// StateVerifiedClientStatus returns the address of the Verified Registry's root key
|
||||||
|
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
|
||||||
|
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
|
||||||
|
// can issue. It takes the deal size and verified status as parameters.
|
||||||
|
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) //perm:read
|
||||||
|
|
||||||
|
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
|
||||||
|
// This is not used anywhere in the protocol itself, and is only for external consumption.
|
||||||
|
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
|
||||||
|
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
|
||||||
|
// This is the value reported by the runtime interface to actors code.
|
||||||
|
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) //perm:read
|
||||||
|
// StateNetworkVersion returns the network version at the given tipset
|
||||||
|
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
|
||||||
|
|
||||||
|
// MethodGroup: Msig
|
||||||
|
// The Msig methods are used to interact with multisig wallets on the
|
||||||
|
// filecoin network
|
||||||
|
|
||||||
|
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
||||||
|
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
// MsigGetVestingSchedule returns the vesting details of a given multisig.
|
||||||
|
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) //perm:read
|
||||||
|
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
|
||||||
|
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||||
|
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
|
||||||
|
|
||||||
|
//MsigGetPending returns pending transactions for the given multisig
|
||||||
|
//wallet. Once pending transactions are fully approved, they will no longer
|
||||||
|
//appear here.
|
||||||
|
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read
|
||||||
|
|
||||||
|
// MsigCreate creates a multisig wallet
|
||||||
|
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||||
|
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||||
|
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigPropose proposes a multisig message
|
||||||
|
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
|
||||||
|
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
|
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MsigApprove approves a previously-proposed multisig message by transaction ID
|
||||||
|
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
|
||||||
|
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
|
||||||
|
// using both transaction ID and a hash of the parameters used in the
|
||||||
|
// proposal. This method of approval can be used to ensure you only approve
|
||||||
|
// exactly the transaction you think you are.
|
||||||
|
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
||||||
|
// <sender address of the approve msg>, <method to call in the approved message>, <params to include in the proposed message>
|
||||||
|
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MsigCancel cancels a previously-proposed multisig message
|
||||||
|
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
|
||||||
|
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||||
|
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigAddPropose proposes adding a signer in the multisig
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigAddApprove approves a previously proposed AddSigner message
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
|
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigAddCancel cancels a previously proposed AddSigner message
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
|
// <new signer>, <whether the number of required signers should be increased>
|
||||||
|
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||||
|
// <old signer>, <new signer>
|
||||||
|
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigSwapApprove approves a previously proposed SwapSigner
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||||
|
// <proposer address>, <old signer>, <new signer>
|
||||||
|
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
||||||
|
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||||
|
// <old signer>, <new signer>
|
||||||
|
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MsigRemoveSigner proposes the removal of a signer from the multisig.
|
||||||
|
// It accepts the multisig to make the change on, the proposer address to
|
||||||
|
// send the message from, the address to be removed, and a boolean
|
||||||
|
// indicating whether or not the signing threshold should be lowered by one
|
||||||
|
// along with the address removal.
|
||||||
|
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MarketAddBalance adds funds to the market actor
|
||||||
|
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
// MarketGetReserved gets the amount of funds that are currently reserved for the address
|
||||||
|
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
|
||||||
|
// MarketReserveFunds reserves funds for a deal
|
||||||
|
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
|
||||||
|
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
|
||||||
|
// MarketWithdraw withdraws unlocked funds from the market actor
|
||||||
|
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// MethodGroup: Paych
|
||||||
|
// The Paych methods are for interacting with and managing payment channels
|
||||||
|
|
||||||
|
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) //perm:sign
|
||||||
|
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
|
||||||
|
PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
|
||||||
|
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
|
||||||
|
PaychList(context.Context) ([]address.Address, error) //perm:read
|
||||||
|
PaychStatus(context.Context, address.Address) (*api.PaychStatus, error) //perm:read
|
||||||
|
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
|
||||||
|
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
|
||||||
|
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) //perm:sign
|
||||||
|
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
|
||||||
|
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
|
||||||
|
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*api.VoucherCreateResult, error) //perm:sign
|
||||||
|
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
|
||||||
|
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
|
||||||
|
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
|
||||||
|
|
||||||
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
|
// method requires that the lotus daemon is running with the
|
||||||
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
|
// the path specified when calling CreateBackup is within the base path
|
||||||
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
}
|
||||||
69
api/v0api/gateway.go
Normal file
69
api/v0api/gateway.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package v0api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/dline"
|
||||||
|
"github.com/filecoin-project/go-state-types/network"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MODIFYING THE API INTERFACE
|
||||||
|
//
|
||||||
|
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
|
||||||
|
// you'll need to make sure they are also present on the V1 (Unstable) API
|
||||||
|
//
|
||||||
|
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
|
||||||
|
// by the V1 api
|
||||||
|
//
|
||||||
|
// When adding / changing methods in this file:
|
||||||
|
// * Do the change here
|
||||||
|
// * Adjust implementation in `node/impl/`
|
||||||
|
// * Run `make gen` - this will:
|
||||||
|
// * Generate proxy structs
|
||||||
|
// * Generate mocks
|
||||||
|
// * Generate markdown docs
|
||||||
|
// * Generate openrpc blobs
|
||||||
|
|
||||||
|
type Gateway interface {
|
||||||
|
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||||
|
ChainHead(ctx context.Context) (*types.TipSet, error)
|
||||||
|
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
|
||||||
|
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
|
||||||
|
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
|
||||||
|
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
|
||||||
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||||
|
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
|
||||||
|
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
|
||||||
|
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
|
||||||
|
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
|
||||||
|
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
|
||||||
|
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
|
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
|
||||||
|
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
|
||||||
|
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||||
|
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
|
||||||
|
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
|
||||||
|
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
|
||||||
|
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
|
||||||
|
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error)
|
||||||
|
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||||
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
|
||||||
|
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||||
|
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
|
||||||
|
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||||
|
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||||
|
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
|
||||||
|
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||||
|
Version(context.Context) (api.APIVersion, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Gateway = *new(FullNode)
|
||||||
32
api/v0api/latest.go
Normal file
32
api/v0api/latest.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package v0api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Common = api.Common
|
||||||
|
type Net = api.Net
|
||||||
|
type CommonNet = api.CommonNet
|
||||||
|
|
||||||
|
type CommonStruct = api.CommonStruct
|
||||||
|
type CommonStub = api.CommonStub
|
||||||
|
type NetStruct = api.NetStruct
|
||||||
|
type NetStub = api.NetStub
|
||||||
|
type CommonNetStruct = api.CommonNetStruct
|
||||||
|
type CommonNetStub = api.CommonNetStub
|
||||||
|
|
||||||
|
type StorageMiner = api.StorageMiner
|
||||||
|
type StorageMinerStruct = api.StorageMinerStruct
|
||||||
|
|
||||||
|
type Worker = api.Worker
|
||||||
|
type WorkerStruct = api.WorkerStruct
|
||||||
|
|
||||||
|
type Wallet = api.Wallet
|
||||||
|
|
||||||
|
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
|
||||||
|
return api.PermissionedStorMinerAPI(a)
|
||||||
|
}
|
||||||
|
|
||||||
|
func PermissionedWorkerAPI(a Worker) Worker {
|
||||||
|
return api.PermissionedWorkerAPI(a)
|
||||||
|
}
|
||||||
13
api/v0api/permissioned.go
Normal file
13
api/v0api/permissioned.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package v0api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func PermissionedFullAPI(a FullNode) FullNode {
|
||||||
|
var out FullNodeStruct
|
||||||
|
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.Internal)
|
||||||
|
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.CommonStruct.Internal)
|
||||||
|
return &out
|
||||||
|
}
|
||||||
2132
api/v0api/proxy_gen.go
Normal file
2132
api/v0api/proxy_gen.go
Normal file
File diff suppressed because it is too large
Load Diff
3079
api/v0api/v0mocks/mock_full.go
Normal file
3079
api/v0api/v0mocks/mock_full.go
Normal file
File diff suppressed because it is too large
Load Diff
187
api/v0api/v1_wrapper.go
Normal file
187
api/v0api/v1_wrapper.go
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
package v0api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WrapperV1Full struct {
|
||||||
|
v1api.FullNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
|
||||||
|
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, api.LookbackNoLimit, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||||
|
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
|
||||||
|
return w.FullNode.StateWaitMsg(ctx, msg, confidence, api.LookbackNoLimit, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) {
|
||||||
|
return w.FullNode.StateWaitMsg(ctx, msg, confidence, limit, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) {
|
||||||
|
ml, err := w.FullNode.StateSearchMsg(ctx, from, msg, api.LookbackNoLimit, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ml == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ml.Receipt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) {
|
||||||
|
ver, err := w.FullNode.Version(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return api.APIVersion{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ver.APIVersion = api.FullAPIVersion0
|
||||||
|
|
||||||
|
return ver, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) {
|
||||||
|
sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("pushing message: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sm.Cid(), nil
|
||||||
|
}
|
||||||
|
func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigApprove(ctx, msig, txID, src)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
||||||
|
p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
||||||
|
p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) {
|
||||||
|
|
||||||
|
p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
|
||||||
|
if err != nil {
|
||||||
|
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.executePrototype(ctx, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ FullNode = &WrapperV1Full{}
|
||||||
12
api/v1api/latest.go
Normal file
12
api/v1api/latest.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package v1api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/filecoin-project/lotus/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type FullNode = api.FullNode
|
||||||
|
type FullNodeStruct = api.FullNodeStruct
|
||||||
|
|
||||||
|
func PermissionedFullAPI(a FullNode) FullNode {
|
||||||
|
return api.PermissionedFullAPI(a)
|
||||||
|
}
|
||||||
73
api/version.go
Normal file
73
api/version.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
xerrors "golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Version uint32
|
||||||
|
|
||||||
|
func newVer(major, minor, patch uint8) Version {
|
||||||
|
return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ints returns (major, minor, patch) versions
|
||||||
|
func (ve Version) Ints() (uint32, uint32, uint32) {
|
||||||
|
v := uint32(ve)
|
||||||
|
return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve Version) String() string {
|
||||||
|
vmj, vmi, vp := ve.Ints()
|
||||||
|
return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ve Version) EqMajorMinor(v2 Version) bool {
|
||||||
|
return ve&minorMask == v2&minorMask
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
NodeUnknown NodeType = iota
|
||||||
|
|
||||||
|
NodeFull
|
||||||
|
NodeMiner
|
||||||
|
NodeWorker
|
||||||
|
)
|
||||||
|
|
||||||
|
var RunningNodeType NodeType
|
||||||
|
|
||||||
|
func VersionForType(nodeType NodeType) (Version, error) {
|
||||||
|
switch nodeType {
|
||||||
|
case NodeFull:
|
||||||
|
return FullAPIVersion1, nil
|
||||||
|
case NodeMiner:
|
||||||
|
return MinerAPIVersion0, nil
|
||||||
|
case NodeWorker:
|
||||||
|
return WorkerAPIVersion0, nil
|
||||||
|
default:
|
||||||
|
return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// semver versions of the rpc api exposed
|
||||||
|
var (
|
||||||
|
FullAPIVersion0 = newVer(1, 3, 0)
|
||||||
|
FullAPIVersion1 = newVer(2, 1, 0)
|
||||||
|
|
||||||
|
MinerAPIVersion0 = newVer(1, 2, 0)
|
||||||
|
WorkerAPIVersion0 = newVer(1, 1, 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
//nolint:varcheck,deadcode
|
||||||
|
const (
|
||||||
|
majorMask = 0xff0000
|
||||||
|
minorMask = 0xffff00
|
||||||
|
patchMask = 0xffffff
|
||||||
|
|
||||||
|
majorOnlyMask = 0xff0000
|
||||||
|
minorOnlyMask = 0x00ff00
|
||||||
|
patchOnlyMask = 0x0000ff
|
||||||
|
)
|
||||||
53
api/wrap.go
Normal file
53
api/wrap.go
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Wrap adapts partial api impl to another version
|
||||||
|
// proxyT is the proxy type used as input in wrapperT
|
||||||
|
// Usage: Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), eventsApi).(EventAPI)
|
||||||
|
func Wrap(proxyT, wrapperT, impl interface{}) interface{} {
|
||||||
|
proxy := reflect.New(reflect.TypeOf(proxyT).Elem())
|
||||||
|
proxyMethods := proxy.Elem().FieldByName("Internal")
|
||||||
|
ri := reflect.ValueOf(impl)
|
||||||
|
|
||||||
|
for i := 0; i < ri.NumMethod(); i++ {
|
||||||
|
mt := ri.Type().Method(i)
|
||||||
|
if proxyMethods.FieldByName(mt.Name).Kind() == reflect.Invalid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fn := ri.Method(i)
|
||||||
|
of := proxyMethods.FieldByName(mt.Name)
|
||||||
|
|
||||||
|
proxyMethods.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
|
||||||
|
return fn.Call(args)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < proxy.Elem().NumField(); i++ {
|
||||||
|
if proxy.Elem().Type().Field(i).Name == "Internal" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
subProxy := proxy.Elem().Field(i).FieldByName("Internal")
|
||||||
|
for i := 0; i < ri.NumMethod(); i++ {
|
||||||
|
mt := ri.Type().Method(i)
|
||||||
|
if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fn := ri.Method(i)
|
||||||
|
of := subProxy.FieldByName(mt.Name)
|
||||||
|
|
||||||
|
subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
|
||||||
|
return fn.Call(args)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wp := reflect.New(reflect.TypeOf(wrapperT).Elem())
|
||||||
|
wp.Elem().Field(0).Set(proxy)
|
||||||
|
return wp.Interface()
|
||||||
|
}
|
||||||
66
blockstore/api.go
Normal file
66
blockstore/api.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ChainIO interface {
|
||||||
|
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||||
|
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type apiBlockstore struct {
|
||||||
|
api ChainIO
|
||||||
|
}
|
||||||
|
|
||||||
|
// This blockstore is adapted in the constructor.
|
||||||
|
var _ BasicBlockstore = (*apiBlockstore)(nil)
|
||||||
|
|
||||||
|
func NewAPIBlockstore(cio ChainIO) Blockstore {
|
||||||
|
bs := &apiBlockstore{api: cio}
|
||||||
|
return Adapt(bs) // return an adapted blockstore.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) DeleteBlock(cid.Cid) error {
|
||||||
|
return xerrors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) Has(c cid.Cid) (bool, error) {
|
||||||
|
return a.api.ChainHasObj(context.TODO(), c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) {
|
||||||
|
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return blocks.NewBlockWithCid(bb, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) {
|
||||||
|
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(bb), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) Put(blocks.Block) error {
|
||||||
|
return xerrors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) PutMany([]blocks.Block) error {
|
||||||
|
return xerrors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return nil, xerrors.New("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *apiBlockstore) HashOnRead(enabled bool) {
|
||||||
|
return
|
||||||
|
}
|
||||||
581
blockstore/badger/blockstore.go
Normal file
581
blockstore/badger/blockstore.go
Normal file
@ -0,0 +1,581 @@
|
|||||||
|
package badgerbs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/dgraph-io/badger/v2"
|
||||||
|
"github.com/dgraph-io/badger/v2/options"
|
||||||
|
"github.com/multiformats/go-base32"
|
||||||
|
"go.uber.org/zap"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
logger "github.com/ipfs/go-log/v2"
|
||||||
|
pool "github.com/libp2p/go-buffer-pool"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// KeyPool is the buffer pool we use to compute storage keys.
|
||||||
|
KeyPool *pool.BufferPool = pool.GlobalPool
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrBlockstoreClosed is returned from blockstore operations after
|
||||||
|
// the blockstore has been closed.
|
||||||
|
ErrBlockstoreClosed = fmt.Errorf("badger blockstore closed")
|
||||||
|
|
||||||
|
log = logger.Logger("badgerbs")
|
||||||
|
)
|
||||||
|
|
||||||
|
// aliases to mask badger dependencies.
|
||||||
|
const (
|
||||||
|
// FileIO is equivalent to badger/options.FileIO.
|
||||||
|
FileIO = options.FileIO
|
||||||
|
// MemoryMap is equivalent to badger/options.MemoryMap.
|
||||||
|
MemoryMap = options.MemoryMap
|
||||||
|
// LoadToRAM is equivalent to badger/options.LoadToRAM.
|
||||||
|
LoadToRAM = options.LoadToRAM
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options embeds the badger options themselves, and augments them with
|
||||||
|
// blockstore-specific options.
|
||||||
|
type Options struct {
|
||||||
|
badger.Options
|
||||||
|
|
||||||
|
// Prefix is an optional prefix to prepend to keys. Default: "".
|
||||||
|
Prefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultOptions(path string) Options {
|
||||||
|
return Options{
|
||||||
|
Options: badger.DefaultOptions(path),
|
||||||
|
Prefix: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// badgerLogger is a local wrapper for go-log to make the interface
|
||||||
|
// compatible with badger.Logger (namely, aliasing Warnf to Warningf)
|
||||||
|
type badgerLogger struct {
|
||||||
|
*zap.SugaredLogger // skips 1 caller to get useful line info, skipping over badger.Options.
|
||||||
|
|
||||||
|
skip2 *zap.SugaredLogger // skips 2 callers, just like above + this logger.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf is required by the badger logger APIs.
|
||||||
|
func (b *badgerLogger) Warningf(format string, args ...interface{}) {
|
||||||
|
b.skip2.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
stateOpen = iota
|
||||||
|
stateClosing
|
||||||
|
stateClosed
|
||||||
|
)
|
||||||
|
|
||||||
|
// Blockstore is a badger-backed IPLD blockstore.
|
||||||
|
type Blockstore struct {
|
||||||
|
stateLk sync.RWMutex
|
||||||
|
state int
|
||||||
|
viewers sync.WaitGroup
|
||||||
|
|
||||||
|
DB *badger.DB
|
||||||
|
|
||||||
|
prefixing bool
|
||||||
|
prefix []byte
|
||||||
|
prefixLen int
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ blockstore.Blockstore = (*Blockstore)(nil)
|
||||||
|
var _ blockstore.Viewer = (*Blockstore)(nil)
|
||||||
|
var _ blockstore.BlockstoreIterator = (*Blockstore)(nil)
|
||||||
|
var _ blockstore.BlockstoreGC = (*Blockstore)(nil)
|
||||||
|
var _ io.Closer = (*Blockstore)(nil)
|
||||||
|
|
||||||
|
// Open creates a new badger-backed blockstore, with the supplied options.
|
||||||
|
func Open(opts Options) (*Blockstore, error) {
|
||||||
|
opts.Logger = &badgerLogger{
|
||||||
|
SugaredLogger: log.Desugar().WithOptions(zap.AddCallerSkip(1)).Sugar(),
|
||||||
|
skip2: log.Desugar().WithOptions(zap.AddCallerSkip(2)).Sugar(),
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := badger.Open(opts.Options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &Blockstore{DB: db}
|
||||||
|
if p := opts.Prefix; p != "" {
|
||||||
|
bs.prefixing = true
|
||||||
|
bs.prefix = []byte(p)
|
||||||
|
bs.prefixLen = len(bs.prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the store. If the store has already been closed, this noops and
|
||||||
|
// returns an error, even if the first closure resulted in error.
|
||||||
|
func (b *Blockstore) Close() error {
|
||||||
|
b.stateLk.Lock()
|
||||||
|
if b.state != stateOpen {
|
||||||
|
b.stateLk.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
b.state = stateClosing
|
||||||
|
b.stateLk.Unlock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
b.stateLk.Lock()
|
||||||
|
b.state = stateClosed
|
||||||
|
b.stateLk.Unlock()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// wait for all accesses to complete
|
||||||
|
b.viewers.Wait()
|
||||||
|
|
||||||
|
return b.DB.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blockstore) access() error {
|
||||||
|
b.stateLk.RLock()
|
||||||
|
defer b.stateLk.RUnlock()
|
||||||
|
|
||||||
|
if b.state != stateOpen {
|
||||||
|
return ErrBlockstoreClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
b.viewers.Add(1)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blockstore) isOpen() bool {
|
||||||
|
b.stateLk.RLock()
|
||||||
|
defer b.stateLk.RUnlock()
|
||||||
|
|
||||||
|
return b.state == stateOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectGarbage runs garbage collection on the value log
|
||||||
|
func (b *Blockstore) CollectGarbage() error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
// compact first to gather the necessary statistics for GC
|
||||||
|
nworkers := runtime.NumCPU() / 2
|
||||||
|
if nworkers < 2 {
|
||||||
|
nworkers = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.DB.Flatten(nworkers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for err == nil {
|
||||||
|
err = b.DB.RunValueLogGC(0.125)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == badger.ErrNoRewrite {
|
||||||
|
// not really an error in this case, it signals the end of GC
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// View implements blockstore.Viewer, which leverages zero-copy read-only
|
||||||
|
// access to values.
|
||||||
|
func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.DB.View(func(txn *badger.Txn) error {
|
||||||
|
switch item, err := txn.Get(k); err {
|
||||||
|
case nil:
|
||||||
|
return item.Value(fn)
|
||||||
|
case badger.ErrKeyNotFound:
|
||||||
|
return blockstore.ErrNotFound
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("failed to view block from badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has implements Blockstore.Has.
|
||||||
|
func (b *Blockstore) Has(cid cid.Cid) (bool, error) {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.DB.View(func(txn *badger.Txn) error {
|
||||||
|
_, err := txn.Get(k)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case badger.ErrKeyNotFound:
|
||||||
|
return false, nil
|
||||||
|
case nil:
|
||||||
|
return true, nil
|
||||||
|
default:
|
||||||
|
return false, fmt.Errorf("failed to check if block exists in badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get implements Blockstore.Get.
|
||||||
|
func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
|
if !cid.Defined() {
|
||||||
|
return nil, blockstore.ErrNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
var val []byte
|
||||||
|
err := b.DB.View(func(txn *badger.Txn) error {
|
||||||
|
switch item, err := txn.Get(k); err {
|
||||||
|
case nil:
|
||||||
|
val, err = item.ValueCopy(nil)
|
||||||
|
return err
|
||||||
|
case badger.ErrKeyNotFound:
|
||||||
|
return blockstore.ErrNotFound
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("failed to get block from badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return blocks.NewBlockWithCid(val, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize implements Blockstore.GetSize.
|
||||||
|
func (b *Blockstore) GetSize(cid cid.Cid) (int, error) {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
var size int
|
||||||
|
err := b.DB.View(func(txn *badger.Txn) error {
|
||||||
|
switch item, err := txn.Get(k); err {
|
||||||
|
case nil:
|
||||||
|
size = int(item.ValueSize())
|
||||||
|
case badger.ErrKeyNotFound:
|
||||||
|
return blockstore.ErrNotFound
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("failed to get block size from badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
size = -1
|
||||||
|
}
|
||||||
|
return size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put implements Blockstore.Put.
|
||||||
|
func (b *Blockstore) Put(block blocks.Block) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(block.Cid())
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := b.DB.Update(func(txn *badger.Txn) error {
|
||||||
|
return txn.Set(k, block.RawData())
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to put block in badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutMany implements Blockstore.PutMany.
|
||||||
|
func (b *Blockstore) PutMany(blocks []blocks.Block) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
// toReturn tracks the byte slices to return to the pool, if we're using key
|
||||||
|
// prefixing. we can't return each slice to the pool after each Set, because
|
||||||
|
// badger holds on to the slice.
|
||||||
|
var toReturn [][]byte
|
||||||
|
if b.prefixing {
|
||||||
|
toReturn = make([][]byte, 0, len(blocks))
|
||||||
|
defer func() {
|
||||||
|
for _, b := range toReturn {
|
||||||
|
KeyPool.Put(b)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := b.DB.NewWriteBatch()
|
||||||
|
defer batch.Cancel()
|
||||||
|
|
||||||
|
for _, block := range blocks {
|
||||||
|
k, pooled := b.PooledStorageKey(block.Cid())
|
||||||
|
if pooled {
|
||||||
|
toReturn = append(toReturn, k)
|
||||||
|
}
|
||||||
|
if err := batch.Set(k, block.RawData()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := batch.Flush()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to put blocks in badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBlock implements Blockstore.DeleteBlock.
|
||||||
|
func (b *Blockstore) DeleteBlock(cid cid.Cid) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
defer KeyPool.Put(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.DB.Update(func(txn *badger.Txn) error {
|
||||||
|
return txn.Delete(k)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Blockstore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
// toReturn tracks the byte slices to return to the pool, if we're using key
|
||||||
|
// prefixing. we can't return each slice to the pool after each Set, because
|
||||||
|
// badger holds on to the slice.
|
||||||
|
var toReturn [][]byte
|
||||||
|
if b.prefixing {
|
||||||
|
toReturn = make([][]byte, 0, len(cids))
|
||||||
|
defer func() {
|
||||||
|
for _, b := range toReturn {
|
||||||
|
KeyPool.Put(b)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := b.DB.NewWriteBatch()
|
||||||
|
defer batch.Cancel()
|
||||||
|
|
||||||
|
for _, cid := range cids {
|
||||||
|
k, pooled := b.PooledStorageKey(cid)
|
||||||
|
if pooled {
|
||||||
|
toReturn = append(toReturn, k)
|
||||||
|
}
|
||||||
|
if err := batch.Delete(k); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err := batch.Flush()
|
||||||
|
if err != nil {
|
||||||
|
err = fmt.Errorf("failed to delete blocks from badger blockstore: %w", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllKeysChan implements Blockstore.AllKeysChan.
|
||||||
|
func (b *Blockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
txn := b.DB.NewTransaction(false)
|
||||||
|
opts := badger.IteratorOptions{PrefetchSize: 100}
|
||||||
|
if b.prefixing {
|
||||||
|
opts.Prefix = b.prefix
|
||||||
|
}
|
||||||
|
iter := txn.NewIterator(opts)
|
||||||
|
|
||||||
|
ch := make(chan cid.Cid)
|
||||||
|
go func() {
|
||||||
|
defer b.viewers.Done()
|
||||||
|
defer close(ch)
|
||||||
|
defer iter.Close()
|
||||||
|
|
||||||
|
// NewCidV1 makes a copy of the multihash buffer, so we can reuse it to
|
||||||
|
// contain allocs.
|
||||||
|
var buf []byte
|
||||||
|
for iter.Rewind(); iter.Valid(); iter.Next() {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return // context has fired.
|
||||||
|
}
|
||||||
|
if !b.isOpen() {
|
||||||
|
// open iterators will run even after the database is closed...
|
||||||
|
return // closing, yield.
|
||||||
|
}
|
||||||
|
k := iter.Item().Key()
|
||||||
|
if b.prefixing {
|
||||||
|
k = k[b.prefixLen:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if reqlen := base32.RawStdEncoding.DecodedLen(len(k)); len(buf) < reqlen {
|
||||||
|
buf = make([]byte, reqlen)
|
||||||
|
}
|
||||||
|
if n, err := base32.RawStdEncoding.Decode(buf, k); err == nil {
|
||||||
|
select {
|
||||||
|
case ch <- cid.NewCidV1(cid.Raw, buf[:n]):
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Warnf("failed to decode key %s in badger AllKeysChan; err: %s", k, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implementation of BlockstoreIterator interface
|
||||||
|
func (b *Blockstore) ForEachKey(f func(cid.Cid) error) error {
|
||||||
|
if err := b.access(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer b.viewers.Done()
|
||||||
|
|
||||||
|
txn := b.DB.NewTransaction(false)
|
||||||
|
defer txn.Discard()
|
||||||
|
|
||||||
|
opts := badger.IteratorOptions{PrefetchSize: 100}
|
||||||
|
if b.prefixing {
|
||||||
|
opts.Prefix = b.prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
iter := txn.NewIterator(opts)
|
||||||
|
defer iter.Close()
|
||||||
|
|
||||||
|
var buf []byte
|
||||||
|
for iter.Rewind(); iter.Valid(); iter.Next() {
|
||||||
|
if !b.isOpen() {
|
||||||
|
return ErrBlockstoreClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
k := iter.Item().Key()
|
||||||
|
if b.prefixing {
|
||||||
|
k = k[b.prefixLen:]
|
||||||
|
}
|
||||||
|
|
||||||
|
klen := base32.RawStdEncoding.DecodedLen(len(k))
|
||||||
|
if klen > len(buf) {
|
||||||
|
buf = make([]byte, klen)
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := base32.RawStdEncoding.Decode(buf, k)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := cid.NewCidV1(cid.Raw, buf[:n])
|
||||||
|
|
||||||
|
err = f(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashOnRead implements Blockstore.HashOnRead. It is not supported by this
|
||||||
|
// blockstore.
|
||||||
|
func (b *Blockstore) HashOnRead(_ bool) {
|
||||||
|
log.Warnf("called HashOnRead on badger blockstore; function not supported; ignoring")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PooledStorageKey returns the storage key under which this CID is stored.
|
||||||
|
//
|
||||||
|
// The key is: prefix + base32_no_padding(cid.Hash)
|
||||||
|
//
|
||||||
|
// This method may return pooled byte slice, which MUST be returned to the
|
||||||
|
// KeyPool if pooled=true, or a leak will occur.
|
||||||
|
func (b *Blockstore) PooledStorageKey(cid cid.Cid) (key []byte, pooled bool) {
|
||||||
|
h := cid.Hash()
|
||||||
|
size := base32.RawStdEncoding.EncodedLen(len(h))
|
||||||
|
if !b.prefixing { // optimize for branch prediction.
|
||||||
|
k := pool.Get(size)
|
||||||
|
base32.RawStdEncoding.Encode(k, h)
|
||||||
|
return k, true // slicing upto length unnecessary; the pool has already done this.
|
||||||
|
}
|
||||||
|
|
||||||
|
size += b.prefixLen
|
||||||
|
k := pool.Get(size)
|
||||||
|
copy(k, b.prefix)
|
||||||
|
base32.RawStdEncoding.Encode(k[b.prefixLen:], h)
|
||||||
|
return k, true // slicing upto length unnecessary; the pool has already done this.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Storage acts like PooledStorageKey, but attempts to write the storage key
|
||||||
|
// into the provided slice. If the slice capacity is insufficient, it allocates
|
||||||
|
// a new byte slice with enough capacity to accommodate the result. This method
|
||||||
|
// returns the resulting slice.
|
||||||
|
func (b *Blockstore) StorageKey(dst []byte, cid cid.Cid) []byte {
|
||||||
|
h := cid.Hash()
|
||||||
|
reqsize := base32.RawStdEncoding.EncodedLen(len(h)) + b.prefixLen
|
||||||
|
if reqsize > cap(dst) {
|
||||||
|
// passed slice is smaller than required size; create new.
|
||||||
|
dst = make([]byte, reqsize)
|
||||||
|
} else if reqsize > len(dst) {
|
||||||
|
// passed slice has enough capacity, but its length is
|
||||||
|
// restricted, expand.
|
||||||
|
dst = dst[:cap(dst)]
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.prefixing { // optimize for branch prediction.
|
||||||
|
copy(dst, b.prefix)
|
||||||
|
base32.RawStdEncoding.Encode(dst[b.prefixLen:], h)
|
||||||
|
} else {
|
||||||
|
base32.RawStdEncoding.Encode(dst, h)
|
||||||
|
}
|
||||||
|
return dst[:reqsize]
|
||||||
|
}
|
||||||
91
blockstore/badger/blockstore_test.go
Normal file
91
blockstore/badger/blockstore_test.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package badgerbs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBadgerBlockstore(t *testing.T) {
|
||||||
|
(&Suite{
|
||||||
|
NewBlockstore: newBlockstore(DefaultOptions),
|
||||||
|
OpenBlockstore: openBlockstore(DefaultOptions),
|
||||||
|
}).RunTests(t, "non_prefixed")
|
||||||
|
|
||||||
|
prefixed := func(path string) Options {
|
||||||
|
opts := DefaultOptions(path)
|
||||||
|
opts.Prefix = "/prefixed/"
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
(&Suite{
|
||||||
|
NewBlockstore: newBlockstore(prefixed),
|
||||||
|
OpenBlockstore: openBlockstore(prefixed),
|
||||||
|
}).RunTests(t, "prefixed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorageKey(t *testing.T) {
|
||||||
|
bs, _ := newBlockstore(DefaultOptions)(t)
|
||||||
|
bbs := bs.(*Blockstore)
|
||||||
|
defer bbs.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
cid1 := blocks.NewBlock([]byte("some data")).Cid()
|
||||||
|
cid2 := blocks.NewBlock([]byte("more data")).Cid()
|
||||||
|
cid3 := blocks.NewBlock([]byte("a little more data")).Cid()
|
||||||
|
require.NotEqual(t, cid1, cid2) // sanity check
|
||||||
|
require.NotEqual(t, cid2, cid3) // sanity check
|
||||||
|
|
||||||
|
// nil slice; let StorageKey allocate for us.
|
||||||
|
k1 := bbs.StorageKey(nil, cid1)
|
||||||
|
require.Len(t, k1, 55)
|
||||||
|
require.True(t, cap(k1) == len(k1))
|
||||||
|
|
||||||
|
// k1's backing array is reused.
|
||||||
|
k2 := bbs.StorageKey(k1, cid2)
|
||||||
|
require.Len(t, k2, 55)
|
||||||
|
require.True(t, cap(k2) == len(k1))
|
||||||
|
|
||||||
|
// bring k2 to len=0, and verify that its backing array gets reused
|
||||||
|
// (i.e. k1 and k2 are overwritten)
|
||||||
|
k3 := bbs.StorageKey(k2[:0], cid3)
|
||||||
|
require.Len(t, k3, 55)
|
||||||
|
require.True(t, cap(k3) == len(k3))
|
||||||
|
|
||||||
|
// backing array of k1 and k2 has been modified, i.e. memory is shared.
|
||||||
|
require.Equal(t, k3, k1)
|
||||||
|
require.Equal(t, k3, k2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
|
||||||
|
return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
path, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err := Open(optsSupplier(path))
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tb.Cleanup(func() {
|
||||||
|
_ = os.RemoveAll(path)
|
||||||
|
})
|
||||||
|
|
||||||
|
return db, path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
|
||||||
|
return func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
|
||||||
|
tb.Helper()
|
||||||
|
return Open(optsSupplier(path))
|
||||||
|
}
|
||||||
|
}
|
||||||
313
blockstore/badger/blockstore_test_suite.go
Normal file
313
blockstore/badger/blockstore_test_suite.go
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
package badgerbs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
u "github.com/ipfs/go-ipfs-util"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: move this to go-ipfs-blockstore.
|
||||||
|
type Suite struct {
|
||||||
|
NewBlockstore func(tb testing.TB) (bs blockstore.BasicBlockstore, path string)
|
||||||
|
OpenBlockstore func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) RunTests(t *testing.T, prefix string) {
|
||||||
|
v := reflect.TypeOf(s)
|
||||||
|
f := func(t *testing.T) {
|
||||||
|
for i := 0; i < v.NumMethod(); i++ {
|
||||||
|
if m := v.Method(i); strings.HasPrefix(m.Name, "Test") {
|
||||||
|
f := m.Func.Interface().(func(*Suite, *testing.T))
|
||||||
|
t.Run(m.Name, func(t *testing.T) {
|
||||||
|
f(s, t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix == "" {
|
||||||
|
f(t)
|
||||||
|
} else {
|
||||||
|
t.Run(prefix, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
c := cid.NewCidV0(u.Hash([]byte("stuff")))
|
||||||
|
bl, err := bs.Get(c)
|
||||||
|
require.Nil(t, bl)
|
||||||
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := bs.Get(cid.Undef)
|
||||||
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
|
err := bs.Put(orig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fetched, err := bs.Get(orig.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestHas(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
|
err := bs.Put(orig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ok, err := bs.Has(orig.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestCidv0v1(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
|
||||||
|
err := bs.Put(orig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash()))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
block := blocks.NewBlock([]byte("some data"))
|
||||||
|
missingBlock := blocks.NewBlock([]byte("missingBlock"))
|
||||||
|
emptyBlock := blocks.NewBlock([]byte{})
|
||||||
|
|
||||||
|
err := bs.Put(block)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
blockSize, err := bs.GetSize(block.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, block.RawData(), blockSize)
|
||||||
|
|
||||||
|
err = bs.Put(emptyBlock)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
emptySize, err := bs.GetSize(emptyBlock.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, emptySize)
|
||||||
|
|
||||||
|
missingSize, err := bs.GetSize(missingBlock.Cid())
|
||||||
|
require.Equal(t, blockstore.ErrNotFound, err)
|
||||||
|
require.Equal(t, -1, missingSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestAllKeysSimple(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := insertBlocks(t, bs, 100)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
ch, err := bs.AllKeysChan(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
actual := collect(ch)
|
||||||
|
|
||||||
|
require.ElementsMatch(t, keys, actual)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = insertBlocks(t, bs, 100)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ch, err := bs.AllKeysChan(ctx)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// consume 2, then cancel context.
|
||||||
|
v, ok := <-ch
|
||||||
|
require.NotEqual(t, cid.Undef, v)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
v, ok = <-ch
|
||||||
|
require.NotEqual(t, cid.Undef, v)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
cancel()
|
||||||
|
// pull one value out to avoid race
|
||||||
|
_, _ = <-ch
|
||||||
|
|
||||||
|
v, ok = <-ch
|
||||||
|
require.Equal(t, cid.Undef, v)
|
||||||
|
require.False(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestDoubleClose(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
c, ok := bs.(io.Closer)
|
||||||
|
if !ok {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
require.NoError(t, c.Close())
|
||||||
|
require.NoError(t, c.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestReopenPutGet(t *testing.T) {
|
||||||
|
bs, path := s.NewBlockstore(t)
|
||||||
|
c, ok := bs.(io.Closer)
|
||||||
|
if !ok {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
orig := blocks.NewBlock([]byte("some data"))
|
||||||
|
err := bs.Put(orig)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = c.Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bs, err = s.OpenBlockstore(t, path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fetched, err := bs.Get(orig.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, orig.RawData(), fetched.RawData())
|
||||||
|
|
||||||
|
err = bs.(io.Closer).Close()
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestPutMany(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
blks := []blocks.Block{
|
||||||
|
blocks.NewBlock([]byte("foo1")),
|
||||||
|
blocks.NewBlock([]byte("foo2")),
|
||||||
|
blocks.NewBlock([]byte("foo3")),
|
||||||
|
}
|
||||||
|
err := bs.PutMany(blks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, blk := range blks {
|
||||||
|
fetched, err := bs.Get(blk.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, blk.RawData(), fetched.RawData())
|
||||||
|
|
||||||
|
ok, err := bs.Has(blk.Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, err := bs.AllKeysChan(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cids := collect(ch)
|
||||||
|
require.Len(t, cids, 3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) TestDelete(t *testing.T) {
|
||||||
|
bs, _ := s.NewBlockstore(t)
|
||||||
|
if c, ok := bs.(io.Closer); ok {
|
||||||
|
defer func() { require.NoError(t, c.Close()) }()
|
||||||
|
}
|
||||||
|
|
||||||
|
blks := []blocks.Block{
|
||||||
|
blocks.NewBlock([]byte("foo1")),
|
||||||
|
blocks.NewBlock([]byte("foo2")),
|
||||||
|
blocks.NewBlock([]byte("foo3")),
|
||||||
|
}
|
||||||
|
err := bs.PutMany(blks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = bs.DeleteBlock(blks[1].Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ch, err := bs.AllKeysChan(context.Background())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cids := collect(ch)
|
||||||
|
require.Len(t, cids, 2)
|
||||||
|
require.ElementsMatch(t, cids, []cid.Cid{
|
||||||
|
cid.NewCidV1(cid.Raw, blks[0].Cid().Hash()),
|
||||||
|
cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()),
|
||||||
|
})
|
||||||
|
|
||||||
|
has, err := bs.Has(blks[1].Cid())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, has)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid {
|
||||||
|
keys := make([]cid.Cid, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
|
||||||
|
err := bs.Put(block)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what
|
||||||
|
// the store returns.
|
||||||
|
keys[i] = cid.NewCidV1(cid.Raw, block.Multihash())
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func collect(ch <-chan cid.Cid) []cid.Cid {
|
||||||
|
var keys []cid.Cid
|
||||||
|
for k := range ch {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
105
blockstore/blockstore.go
Normal file
105
blockstore/blockstore.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
ds "github.com/ipfs/go-datastore"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
|
||||||
|
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
var log = logging.Logger("blockstore")
|
||||||
|
|
||||||
|
var ErrNotFound = blockstore.ErrNotFound
|
||||||
|
|
||||||
|
// Blockstore is the blockstore interface used by Lotus. It is the union
|
||||||
|
// of the basic go-ipfs blockstore, with other capabilities required by Lotus,
|
||||||
|
// e.g. View or Sync.
|
||||||
|
type Blockstore interface {
|
||||||
|
blockstore.Blockstore
|
||||||
|
blockstore.Viewer
|
||||||
|
BatchDeleter
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasicBlockstore is an alias to the original IPFS Blockstore.
|
||||||
|
type BasicBlockstore = blockstore.Blockstore
|
||||||
|
|
||||||
|
type Viewer = blockstore.Viewer
|
||||||
|
|
||||||
|
type BatchDeleter interface {
|
||||||
|
DeleteMany(cids []cid.Cid) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockstoreIterator is a trait for efficient iteration
|
||||||
|
type BlockstoreIterator interface {
|
||||||
|
ForEachKey(func(cid.Cid) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockstoreGC is a trait for blockstores that support online garbage collection
|
||||||
|
type BlockstoreGC interface {
|
||||||
|
CollectGarbage() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapIDStore wraps the underlying blockstore in an "identity" blockstore.
|
||||||
|
// The ID store filters out all puts for blocks with CIDs using the "identity"
|
||||||
|
// hash function. It also extracts inlined blocks from CIDs using the identity
|
||||||
|
// hash function and returns them on get/has, ignoring the contents of the
|
||||||
|
// blockstore.
|
||||||
|
func WrapIDStore(bstore blockstore.Blockstore) Blockstore {
|
||||||
|
if is, ok := bstore.(*idstore); ok {
|
||||||
|
// already wrapped
|
||||||
|
return is
|
||||||
|
}
|
||||||
|
|
||||||
|
if bs, ok := bstore.(Blockstore); ok {
|
||||||
|
// we need to wrap our own because we don't want to neuter the DeleteMany method
|
||||||
|
// the underlying blockstore has implemented an (efficient) DeleteMany
|
||||||
|
return NewIDStore(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The underlying blockstore does not implement DeleteMany, so we need to shim it.
|
||||||
|
// This is less efficient as it'll iterate and perform single deletes.
|
||||||
|
return NewIDStore(Adapt(bstore))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromDatastore creates a new blockstore backed by the given datastore.
|
||||||
|
func FromDatastore(dstore ds.Batching) Blockstore {
|
||||||
|
return WrapIDStore(blockstore.NewBlockstore(dstore))
|
||||||
|
}
|
||||||
|
|
||||||
|
type adaptedBlockstore struct {
|
||||||
|
blockstore.Blockstore
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Blockstore = (*adaptedBlockstore)(nil)
|
||||||
|
|
||||||
|
func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error {
|
||||||
|
blk, err := a.Get(cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return callback(blk.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
for _, cid := range cids {
|
||||||
|
err := a.DeleteBlock(cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adapt adapts a standard blockstore to a Lotus blockstore by
|
||||||
|
// enriching it with the extra methods that Lotus requires (e.g. View, Sync).
|
||||||
|
//
|
||||||
|
// View proxies over to Get and calls the callback with the value supplied by Get.
|
||||||
|
// Sync noops.
|
||||||
|
func Adapt(bs blockstore.Blockstore) Blockstore {
|
||||||
|
if ret, ok := bs.(Blockstore); ok {
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
return &adaptedBlockstore{bs}
|
||||||
|
}
|
||||||
174
blockstore/buffered.go
Normal file
174
blockstore/buffered.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
block "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// buflog is a logger for the buffered blockstore. It is subscoped from the
|
||||||
|
// blockstore logger.
|
||||||
|
var buflog = log.Named("buf")
|
||||||
|
|
||||||
|
type BufferedBlockstore struct {
|
||||||
|
read Blockstore
|
||||||
|
write Blockstore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBuffered(base Blockstore) *BufferedBlockstore {
|
||||||
|
var buf Blockstore
|
||||||
|
if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" {
|
||||||
|
buflog.Warn("VM BLOCKSTORE BUFFERING IS DISABLED")
|
||||||
|
buf = base
|
||||||
|
} else {
|
||||||
|
buf = NewMemory()
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &BufferedBlockstore{
|
||||||
|
read: base,
|
||||||
|
write: buf,
|
||||||
|
}
|
||||||
|
return bs
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBlockstore {
|
||||||
|
return &BufferedBlockstore{
|
||||||
|
read: r,
|
||||||
|
write: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Blockstore = (*BufferedBlockstore)(nil)
|
||||||
|
_ Viewer = (*BufferedBlockstore)(nil)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
a, err := bs.read.AllKeysChan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := bs.write.AllKeysChan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make(chan cid.Cid)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
for a != nil || b != nil {
|
||||||
|
select {
|
||||||
|
case val, ok := <-a:
|
||||||
|
if !ok {
|
||||||
|
a = nil
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case out <- val:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case val, ok := <-b:
|
||||||
|
if !ok {
|
||||||
|
b = nil
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case out <- val:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error {
|
||||||
|
if err := bs.read.DeleteBlock(c); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs.write.DeleteBlock(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
if err := bs.read.DeleteMany(cids); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs.write.DeleteMany(cids)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error {
|
||||||
|
// both stores are viewable.
|
||||||
|
if err := bs.write.View(c, callback); err == ErrNotFound {
|
||||||
|
// not found in write blockstore; fall through.
|
||||||
|
} else {
|
||||||
|
return err // propagate errors, or nil, i.e. found.
|
||||||
|
}
|
||||||
|
return bs.read.View(c, callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) {
|
||||||
|
if out, err := bs.write.Get(c); err != nil {
|
||||||
|
if err != ErrNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs.read.Get(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) {
|
||||||
|
s, err := bs.read.GetSize(c)
|
||||||
|
if err == ErrNotFound || s == 0 {
|
||||||
|
return bs.write.GetSize(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) Put(blk block.Block) error {
|
||||||
|
has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs.write.Put(blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) {
|
||||||
|
has, err := bs.write.Has(c)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if has {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return bs.read.Has(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) HashOnRead(hor bool) {
|
||||||
|
bs.read.HashOnRead(hor)
|
||||||
|
bs.write.HashOnRead(hor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) PutMany(blks []block.Block) error {
|
||||||
|
return bs.write.PutMany(blks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bs *BufferedBlockstore) Read() Blockstore {
|
||||||
|
return bs.read
|
||||||
|
}
|
||||||
66
blockstore/discard.go
Normal file
66
blockstore/discard.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Blockstore = (*discardstore)(nil)
|
||||||
|
|
||||||
|
type discardstore struct {
|
||||||
|
bs Blockstore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDiscardStore(bs Blockstore) Blockstore {
|
||||||
|
return &discardstore{bs: bs}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) Has(cid cid.Cid) (bool, error) {
|
||||||
|
return b.bs.Has(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) HashOnRead(hor bool) {
|
||||||
|
b.bs.HashOnRead(hor)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
|
return b.bs.Get(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) GetSize(cid cid.Cid) (int, error) {
|
||||||
|
return b.bs.GetSize(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error {
|
||||||
|
return b.bs.View(cid, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) Put(blk blocks.Block) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) PutMany(blks []blocks.Block) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) DeleteBlock(cid cid.Cid) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return b.bs.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *discardstore) Close() error {
|
||||||
|
if c, ok := b.bs.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
9
blockstore/doc.go
Normal file
9
blockstore/doc.go
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
// Package blockstore and subpackages contain most of the blockstore
|
||||||
|
// implementations used by Lotus.
|
||||||
|
//
|
||||||
|
// Blockstores not ultimately constructed out of the building blocks in this
|
||||||
|
// package may not work properly.
|
||||||
|
//
|
||||||
|
// This package re-exports parts of the go-ipfs-blockstore package such that
|
||||||
|
// no other package needs to import it directly, for ergonomics and traceability.
|
||||||
|
package blockstore
|
||||||
106
blockstore/fallback.go
Normal file
106
blockstore/fallback.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnwrapFallbackStore takes a blockstore, and returns the underlying blockstore
|
||||||
|
// if it was a FallbackStore. Otherwise, it just returns the supplied store
|
||||||
|
// unmodified.
|
||||||
|
func UnwrapFallbackStore(bs Blockstore) (Blockstore, bool) {
|
||||||
|
if fbs, ok := bs.(*FallbackStore); ok {
|
||||||
|
return fbs.Blockstore, true
|
||||||
|
}
|
||||||
|
return bs, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// FallbackStore is a read-through store that queries another (potentially
|
||||||
|
// remote) source if the block is not found locally. If the block is found
|
||||||
|
// during the fallback, it stores it in the local store.
|
||||||
|
type FallbackStore struct {
|
||||||
|
Blockstore
|
||||||
|
|
||||||
|
lk sync.RWMutex
|
||||||
|
// missFn is the function that will be invoked on a local miss to pull the
|
||||||
|
// block from elsewhere.
|
||||||
|
missFn func(context.Context, cid.Cid) (blocks.Block, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Blockstore = (*FallbackStore)(nil)
|
||||||
|
|
||||||
|
func (fbs *FallbackStore) SetFallback(missFn func(context.Context, cid.Cid) (blocks.Block, error)) {
|
||||||
|
fbs.lk.Lock()
|
||||||
|
defer fbs.lk.Unlock()
|
||||||
|
|
||||||
|
fbs.missFn = missFn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) {
|
||||||
|
log.Warnf("fallbackstore: block not found locally, fetching from the network; cid: %s", c)
|
||||||
|
fbs.lk.RLock()
|
||||||
|
defer fbs.lk.RUnlock()
|
||||||
|
|
||||||
|
if fbs.missFn == nil {
|
||||||
|
// FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet)
|
||||||
|
// Wait for a bit and retry
|
||||||
|
fbs.lk.RUnlock()
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
fbs.lk.RLock()
|
||||||
|
|
||||||
|
if fbs.missFn == nil {
|
||||||
|
log.Errorw("fallbackstore: missFn not configured yet")
|
||||||
|
return nil, ErrNotFound
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
b, err := fbs.missFn(ctx, c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// chain bitswap puts blocks in temp blockstore which is cleaned up
|
||||||
|
// every few min (to drop any messages we fetched but don't want)
|
||||||
|
// in this case we want to keep this block around
|
||||||
|
if err := fbs.Put(b); err != nil {
|
||||||
|
return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err)
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) {
|
||||||
|
b, err := fbs.Blockstore.Get(c)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return b, nil
|
||||||
|
case ErrNotFound:
|
||||||
|
return fbs.getFallback(c)
|
||||||
|
default:
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) {
|
||||||
|
sz, err := fbs.Blockstore.GetSize(c)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return sz, nil
|
||||||
|
case ErrNotFound:
|
||||||
|
b, err := fbs.getFallback(c)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return len(b.RawData()), nil
|
||||||
|
default:
|
||||||
|
return sz, err
|
||||||
|
}
|
||||||
|
}
|
||||||
174
blockstore/idstore.go
Normal file
174
blockstore/idstore.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
mh "github.com/multiformats/go-multihash"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Blockstore = (*idstore)(nil)
|
||||||
|
|
||||||
|
type idstore struct {
|
||||||
|
bs Blockstore
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewIDStore(bs Blockstore) Blockstore {
|
||||||
|
return &idstore{bs: bs}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) {
|
||||||
|
if cid.Prefix().MhType != mh.IDENTITY {
|
||||||
|
return false, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dmh, err := mh.Decode(cid.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dmh.Code == mh.IDENTITY {
|
||||||
|
return true, dmh.Digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) Has(cid cid.Cid) (bool, error) {
|
||||||
|
inline, _, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return false, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.Has(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
|
inline, data, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.Get(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) GetSize(cid cid.Cid) (int, error) {
|
||||||
|
inline, data, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return 0, xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return len(data), err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.GetSize(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error {
|
||||||
|
inline, data, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return cb(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.View(cid, cb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) Put(blk blocks.Block) error {
|
||||||
|
inline, _, err := decodeCid(blk.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.Put(blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) PutMany(blks []blocks.Block) error {
|
||||||
|
toPut := make([]blocks.Block, 0, len(blks))
|
||||||
|
for _, blk := range blks {
|
||||||
|
inline, _, err := decodeCid(blk.Cid())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
toPut = append(toPut, blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(toPut) > 0 {
|
||||||
|
return b.bs.PutMany(toPut)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) DeleteBlock(cid cid.Cid) error {
|
||||||
|
inline, _, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.bs.DeleteBlock(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
toDelete := make([]cid.Cid, 0, len(cids))
|
||||||
|
for _, cid := range cids {
|
||||||
|
inline, _, err := decodeCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error decoding Cid: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if inline {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
toDelete = append(toDelete, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(toDelete) > 0 {
|
||||||
|
return b.bs.DeleteMany(toDelete)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return b.bs.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) HashOnRead(enabled bool) {
|
||||||
|
b.bs.HashOnRead(enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *idstore) Close() error {
|
||||||
|
if c, ok := b.bs.(io.Closer); ok {
|
||||||
|
return c.Close()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package ipfsbstore
|
package blockstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -16,53 +16,75 @@ import (
|
|||||||
iface "github.com/ipfs/interface-go-ipfs-core"
|
iface "github.com/ipfs/interface-go-ipfs-core"
|
||||||
"github.com/ipfs/interface-go-ipfs-core/options"
|
"github.com/ipfs/interface-go-ipfs-core/options"
|
||||||
"github.com/ipfs/interface-go-ipfs-core/path"
|
"github.com/ipfs/interface-go-ipfs-core/path"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type IpfsBstore struct {
|
type IPFSBlockstore struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
api iface.CoreAPI
|
api, offlineAPI iface.CoreAPI
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewIpfsBstore(ctx context.Context) (*IpfsBstore, error) {
|
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
|
||||||
|
|
||||||
|
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
|
||||||
localApi, err := httpapi.NewLocalApi()
|
localApi, err := httpapi.NewLocalApi()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
||||||
}
|
}
|
||||||
api, err := localApi.WithOptions(options.Api.Offline(true))
|
api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("setting offline mode: %s", err)
|
return nil, xerrors.Errorf("setting offline mode: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &IpfsBstore{
|
offlineAPI := api
|
||||||
ctx: ctx,
|
if onlineMode {
|
||||||
api: api,
|
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
|
||||||
}, nil
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &IPFSBlockstore{
|
||||||
|
ctx: ctx,
|
||||||
|
api: api,
|
||||||
|
offlineAPI: offlineAPI,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Adapt(bs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRemoteIpfsBstore(ctx context.Context, maddr multiaddr.Multiaddr) (*IpfsBstore, error) {
|
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
|
||||||
httpApi, err := httpapi.NewApi(maddr)
|
httpApi, err := httpapi.NewApi(maddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
||||||
}
|
}
|
||||||
api, err := httpApi.WithOptions(options.Api.Offline(true))
|
api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &IpfsBstore{
|
offlineAPI := api
|
||||||
ctx: ctx,
|
if onlineMode {
|
||||||
api: api,
|
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
|
||||||
}, nil
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &IPFSBlockstore{
|
||||||
|
ctx: ctx,
|
||||||
|
api: api,
|
||||||
|
offlineAPI: offlineAPI,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Adapt(bs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) DeleteBlock(cid cid.Cid) error {
|
func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error {
|
||||||
return xerrors.Errorf("not supported")
|
return xerrors.Errorf("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) {
|
func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) {
|
||||||
_, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
|
_, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The underlying client is running in Offline mode.
|
// The underlying client is running in Offline mode.
|
||||||
// Stat() will fail with an err if the block isn't in the
|
// Stat() will fail with an err if the block isn't in the
|
||||||
@ -77,7 +99,7 @@ func (i *IpfsBstore) Has(cid cid.Cid) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) {
|
func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid))
|
rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
@ -91,7 +113,7 @@ func (i *IpfsBstore) Get(cid cid.Cid) (blocks.Block, error) {
|
|||||||
return blocks.NewBlockWithCid(data, cid)
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) {
|
func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) {
|
||||||
st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
|
st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
@ -100,7 +122,7 @@ func (i *IpfsBstore) GetSize(cid cid.Cid) (int, error) {
|
|||||||
return st.Size(), nil
|
return st.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) Put(block blocks.Block) error {
|
func (i *IPFSBlockstore) Put(block blocks.Block) error {
|
||||||
mhd, err := multihash.Decode(block.Cid().Hash())
|
mhd, err := multihash.Decode(block.Cid().Hash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -112,7 +134,7 @@ func (i *IpfsBstore) Put(block blocks.Block) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) PutMany(blocks []blocks.Block) error {
|
func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error {
|
||||||
// TODO: could be done in parallel
|
// TODO: could be done in parallel
|
||||||
|
|
||||||
for _, block := range blocks {
|
for _, block := range blocks {
|
||||||
@ -124,12 +146,10 @@ func (i *IpfsBstore) PutMany(blocks []blocks.Block) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
return nil, xerrors.Errorf("not supported")
|
return nil, xerrors.Errorf("not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IpfsBstore) HashOnRead(enabled bool) {
|
func (i *IPFSBlockstore) HashOnRead(enabled bool) {
|
||||||
return // TODO: We could technically support this, but..
|
return // TODO: We could technically support this, but..
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ blockstore.Blockstore = &IpfsBstore{}
|
|
||||||
@ -5,38 +5,60 @@ import (
|
|||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type MemStore map[cid.Cid]blocks.Block
|
// NewMemory returns a temporary memory-backed blockstore.
|
||||||
|
func NewMemory() MemBlockstore {
|
||||||
|
return make(MemBlockstore)
|
||||||
|
}
|
||||||
|
|
||||||
func (m MemStore) DeleteBlock(k cid.Cid) error {
|
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
|
||||||
|
type MemBlockstore map[cid.Cid]blocks.Block
|
||||||
|
|
||||||
|
func (m MemBlockstore) DeleteBlock(k cid.Cid) error {
|
||||||
delete(m, k)
|
delete(m, k)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m MemStore) Has(k cid.Cid) (bool, error) {
|
|
||||||
|
func (m MemBlockstore) DeleteMany(ks []cid.Cid) error {
|
||||||
|
for _, k := range ks {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MemBlockstore) Has(k cid.Cid) (bool, error) {
|
||||||
_, ok := m[k]
|
_, ok := m[k]
|
||||||
return ok, nil
|
return ok, nil
|
||||||
}
|
}
|
||||||
func (m MemStore) Get(k cid.Cid) (blocks.Block, error) {
|
|
||||||
|
func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
||||||
b, ok := m[k]
|
b, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, blockstore.ErrNotFound
|
return ErrNotFound
|
||||||
|
}
|
||||||
|
return callback(b.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
||||||
|
b, ok := m[k]
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrNotFound
|
||||||
}
|
}
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetSize returns the CIDs mapped BlockSize
|
// GetSize returns the CIDs mapped BlockSize
|
||||||
func (m MemStore) GetSize(k cid.Cid) (int, error) {
|
func (m MemBlockstore) GetSize(k cid.Cid) (int, error) {
|
||||||
b, ok := m[k]
|
b, ok := m[k]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, blockstore.ErrNotFound
|
return 0, ErrNotFound
|
||||||
}
|
}
|
||||||
return len(b.RawData()), nil
|
return len(b.RawData()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put puts a given block to the underlying datastore
|
// Put puts a given block to the underlying datastore
|
||||||
func (m MemStore) Put(b blocks.Block) error {
|
func (m MemBlockstore) Put(b blocks.Block) error {
|
||||||
// Convert to a basic block for safety, but try to reuse the existing
|
// Convert to a basic block for safety, but try to reuse the existing
|
||||||
// block if it's already a basic block.
|
// block if it's already a basic block.
|
||||||
k := b.Cid()
|
k := b.Cid()
|
||||||
@ -54,7 +76,7 @@ func (m MemStore) Put(b blocks.Block) error {
|
|||||||
|
|
||||||
// PutMany puts a slice of blocks at the same time using batching
|
// PutMany puts a slice of blocks at the same time using batching
|
||||||
// capabilities of the underlying datastore whenever possible.
|
// capabilities of the underlying datastore whenever possible.
|
||||||
func (m MemStore) PutMany(bs []blocks.Block) error {
|
func (m MemBlockstore) PutMany(bs []blocks.Block) error {
|
||||||
for _, b := range bs {
|
for _, b := range bs {
|
||||||
_ = m.Put(b) // can't fail
|
_ = m.Put(b) // can't fail
|
||||||
}
|
}
|
||||||
@ -64,7 +86,7 @@ func (m MemStore) PutMany(bs []blocks.Block) error {
|
|||||||
// AllKeysChan returns a channel from which
|
// AllKeysChan returns a channel from which
|
||||||
// the CIDs in the Blockstore can be read. It should respect
|
// the CIDs in the Blockstore can be read. It should respect
|
||||||
// the given context, closing the channel if it becomes Done.
|
// the given context, closing the channel if it becomes Done.
|
||||||
func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
ch := make(chan cid.Cid, len(m))
|
ch := make(chan cid.Cid, len(m))
|
||||||
for k := range m {
|
for k := range m {
|
||||||
ch <- k
|
ch <- k
|
||||||
@ -75,6 +97,6 @@ func (m MemStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|||||||
|
|
||||||
// HashOnRead specifies if every read block should be
|
// HashOnRead specifies if every read block should be
|
||||||
// rehashed to make sure it matches its CID.
|
// rehashed to make sure it matches its CID.
|
||||||
func (m MemStore) HashOnRead(enabled bool) {
|
func (m MemBlockstore) HashOnRead(enabled bool) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
154
blockstore/metrics.go
Normal file
154
blockstore/metrics.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
"go.opencensus.io/stats/view"
|
||||||
|
"go.opencensus.io/tag"
|
||||||
|
)
|
||||||
|
|
||||||
|
//
|
||||||
|
// Currently unused, but kept in repo in case we introduce one of the candidate
|
||||||
|
// cache implementations (Freecache, Ristretto), both of which report these
|
||||||
|
// metrics.
|
||||||
|
//
|
||||||
|
|
||||||
|
// CacheMetricsEmitInterval is the interval at which metrics are emitted onto
|
||||||
|
// OpenCensus.
|
||||||
|
var CacheMetricsEmitInterval = 5 * time.Second
|
||||||
|
|
||||||
|
var (
|
||||||
|
CacheName, _ = tag.NewKey("cache_name")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CacheMeasures groups all metrics emitted by the blockstore caches.
|
||||||
|
var CacheMeasures = struct {
|
||||||
|
HitRatio *stats.Float64Measure
|
||||||
|
Hits *stats.Int64Measure
|
||||||
|
Misses *stats.Int64Measure
|
||||||
|
Entries *stats.Int64Measure
|
||||||
|
QueriesServed *stats.Int64Measure
|
||||||
|
Adds *stats.Int64Measure
|
||||||
|
Updates *stats.Int64Measure
|
||||||
|
Evictions *stats.Int64Measure
|
||||||
|
CostAdded *stats.Int64Measure
|
||||||
|
CostEvicted *stats.Int64Measure
|
||||||
|
SetsDropped *stats.Int64Measure
|
||||||
|
SetsRejected *stats.Int64Measure
|
||||||
|
QueriesDropped *stats.Int64Measure
|
||||||
|
}{
|
||||||
|
HitRatio: stats.Float64("blockstore/cache/hit_ratio", "Hit ratio of blockstore cache", stats.UnitDimensionless),
|
||||||
|
Hits: stats.Int64("blockstore/cache/hits", "Total number of hits at blockstore cache", stats.UnitDimensionless),
|
||||||
|
Misses: stats.Int64("blockstore/cache/misses", "Total number of misses at blockstore cache", stats.UnitDimensionless),
|
||||||
|
Entries: stats.Int64("blockstore/cache/entry_count", "Total number of entries currently in the blockstore cache", stats.UnitDimensionless),
|
||||||
|
QueriesServed: stats.Int64("blockstore/cache/queries_served", "Total number of queries served by the blockstore cache", stats.UnitDimensionless),
|
||||||
|
Adds: stats.Int64("blockstore/cache/adds", "Total number of adds to blockstore cache", stats.UnitDimensionless),
|
||||||
|
Updates: stats.Int64("blockstore/cache/updates", "Total number of updates in blockstore cache", stats.UnitDimensionless),
|
||||||
|
Evictions: stats.Int64("blockstore/cache/evictions", "Total number of evictions from blockstore cache", stats.UnitDimensionless),
|
||||||
|
CostAdded: stats.Int64("blockstore/cache/cost_added", "Total cost (byte size) of entries added into blockstore cache", stats.UnitBytes),
|
||||||
|
CostEvicted: stats.Int64("blockstore/cache/cost_evicted", "Total cost (byte size) of entries evicted by blockstore cache", stats.UnitBytes),
|
||||||
|
SetsDropped: stats.Int64("blockstore/cache/sets_dropped", "Total number of sets dropped by blockstore cache", stats.UnitDimensionless),
|
||||||
|
SetsRejected: stats.Int64("blockstore/cache/sets_rejected", "Total number of sets rejected by blockstore cache", stats.UnitDimensionless),
|
||||||
|
QueriesDropped: stats.Int64("blockstore/cache/queries_dropped", "Total number of queries dropped by blockstore cache", stats.UnitDimensionless),
|
||||||
|
}
|
||||||
|
|
||||||
|
// CacheViews groups all cache-related default views.
|
||||||
|
var CacheViews = struct {
|
||||||
|
HitRatio *view.View
|
||||||
|
Hits *view.View
|
||||||
|
Misses *view.View
|
||||||
|
Entries *view.View
|
||||||
|
QueriesServed *view.View
|
||||||
|
Adds *view.View
|
||||||
|
Updates *view.View
|
||||||
|
Evictions *view.View
|
||||||
|
CostAdded *view.View
|
||||||
|
CostEvicted *view.View
|
||||||
|
SetsDropped *view.View
|
||||||
|
SetsRejected *view.View
|
||||||
|
QueriesDropped *view.View
|
||||||
|
}{
|
||||||
|
HitRatio: &view.View{
|
||||||
|
Measure: CacheMeasures.HitRatio,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Hits: &view.View{
|
||||||
|
Measure: CacheMeasures.Hits,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Misses: &view.View{
|
||||||
|
Measure: CacheMeasures.Misses,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Entries: &view.View{
|
||||||
|
Measure: CacheMeasures.Entries,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
QueriesServed: &view.View{
|
||||||
|
Measure: CacheMeasures.QueriesServed,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Adds: &view.View{
|
||||||
|
Measure: CacheMeasures.Adds,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Updates: &view.View{
|
||||||
|
Measure: CacheMeasures.Updates,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
Evictions: &view.View{
|
||||||
|
Measure: CacheMeasures.Evictions,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
CostAdded: &view.View{
|
||||||
|
Measure: CacheMeasures.CostAdded,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
CostEvicted: &view.View{
|
||||||
|
Measure: CacheMeasures.CostEvicted,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
SetsDropped: &view.View{
|
||||||
|
Measure: CacheMeasures.SetsDropped,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
SetsRejected: &view.View{
|
||||||
|
Measure: CacheMeasures.SetsRejected,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
QueriesDropped: &view.View{
|
||||||
|
Measure: CacheMeasures.QueriesDropped,
|
||||||
|
Aggregation: view.LastValue(),
|
||||||
|
TagKeys: []tag.Key{CacheName},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultViews exports all default views for this package.
|
||||||
|
var DefaultViews = []*view.View{
|
||||||
|
CacheViews.HitRatio,
|
||||||
|
CacheViews.Hits,
|
||||||
|
CacheViews.Misses,
|
||||||
|
CacheViews.Entries,
|
||||||
|
CacheViews.QueriesServed,
|
||||||
|
CacheViews.Adds,
|
||||||
|
CacheViews.Updates,
|
||||||
|
CacheViews.Evictions,
|
||||||
|
CacheViews.CostAdded,
|
||||||
|
CacheViews.CostEvicted,
|
||||||
|
CacheViews.SetsDropped,
|
||||||
|
CacheViews.SetsRejected,
|
||||||
|
CacheViews.QueriesDropped,
|
||||||
|
}
|
||||||
72
blockstore/splitstore/README.md
Normal file
72
blockstore/splitstore/README.md
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
# SplitStore: An actively scalable blockstore for the Filecoin chain
|
||||||
|
|
||||||
|
The SplitStore was first introduced in lotus v1.5.1, as an experiment
|
||||||
|
in reducing the performance impact of large blockstores.
|
||||||
|
|
||||||
|
With lotus v1.11.1, we introduce the next iteration in design and
|
||||||
|
implementation, which we call SplitStore v1.
|
||||||
|
|
||||||
|
The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474)
|
||||||
|
evolves the splitstore to be a freestanding compacting blockstore that
|
||||||
|
allows us to keep a small (60-100GB) working set in a hot blockstore
|
||||||
|
and reliably archive out of scope objects in a coldstore. The
|
||||||
|
coldstore can also be a discard store, whereby out of scope objects
|
||||||
|
are discarded or a regular badger blockstore (the default), which can
|
||||||
|
be periodically garbage collected according to configurable user
|
||||||
|
retention policies.
|
||||||
|
|
||||||
|
To enable the splitstore, edit `.lotus/config.toml` and add the following:
|
||||||
|
```
|
||||||
|
[Chainstore]
|
||||||
|
EnableSplitstore = true
|
||||||
|
```
|
||||||
|
|
||||||
|
If you intend to use the discard coldstore, your also need to add the following:
|
||||||
|
```
|
||||||
|
[Chainstore.Splitstore]
|
||||||
|
ColdStoreType = "discard"
|
||||||
|
```
|
||||||
|
In general you _should not_ have to use the discard store, unless you
|
||||||
|
are running a network booster or have very constrained hardware with
|
||||||
|
not enough disk space to maintain a coldstore, even with garbage
|
||||||
|
collection.
|
||||||
|
|
||||||
|
|
||||||
|
## Operation
|
||||||
|
|
||||||
|
When the splitstore is first enabled, the existing blockstore becomes
|
||||||
|
the coldstore and a fresh hotstore is initialized.
|
||||||
|
|
||||||
|
The hotstore is warmed up on first startup so as to load all chain
|
||||||
|
headers and state roots in the current head. This allows us to
|
||||||
|
immediately gain the performance benefits of a smallerblockstore which
|
||||||
|
can be substantial for full archival nodes.
|
||||||
|
|
||||||
|
All new writes are directed to the hotstore, while reads first hit the
|
||||||
|
hotstore, with fallback to the coldstore.
|
||||||
|
|
||||||
|
Once 5 finalities have ellapsed, and every finality henceforth, the
|
||||||
|
blockstore _compacts_. Compaction is the process of moving all
|
||||||
|
unreachable objects within the last 4 finalities from the hotstore to
|
||||||
|
the coldstore. If the system is configured with a discard coldstore,
|
||||||
|
these objects are discarded. Note that chain headers, all the way to
|
||||||
|
genesis, are considered reachable. Stateroots and messages are
|
||||||
|
considered reachable only within the last 4 finalities, unless there
|
||||||
|
is a live reference to them.
|
||||||
|
|
||||||
|
## Compaction
|
||||||
|
|
||||||
|
Compaction works transactionally with the following algorithm:
|
||||||
|
- We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
|
||||||
|
- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis.
|
||||||
|
- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references.
|
||||||
|
- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge.
|
||||||
|
- When running with a coldstore, we next copy all cold objects to the coldstore.
|
||||||
|
- At this point we are ready to begin purging:
|
||||||
|
- We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
|
||||||
|
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
|
||||||
|
- We then end the transaction and compact/gc the hotstore.
|
||||||
|
|
||||||
|
## Coldstore Garbage Collection
|
||||||
|
|
||||||
|
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)
|
||||||
273
blockstore/splitstore/debug.go
Normal file
273
blockstore/splitstore/debug.go
Normal file
@ -0,0 +1,273 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/multierr"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type debugLog struct {
|
||||||
|
readLog, writeLog, deleteLog, stackLog *debugLogOp
|
||||||
|
|
||||||
|
stackMx sync.Mutex
|
||||||
|
stackMap map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type debugLogOp struct {
|
||||||
|
path string
|
||||||
|
mx sync.Mutex
|
||||||
|
log *os.File
|
||||||
|
count int
|
||||||
|
}
|
||||||
|
|
||||||
|
func openDebugLog(path string) (*debugLog, error) {
|
||||||
|
basePath := filepath.Join(path, "debug")
|
||||||
|
err := os.MkdirAll(basePath, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
readLog, err := openDebugLogOp(basePath, "read.log")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
writeLog, err := openDebugLogOp(basePath, "write.log")
|
||||||
|
if err != nil {
|
||||||
|
_ = readLog.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
deleteLog, err := openDebugLogOp(basePath, "delete.log")
|
||||||
|
if err != nil {
|
||||||
|
_ = readLog.Close()
|
||||||
|
_ = writeLog.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stackLog, err := openDebugLogOp(basePath, "stack.log")
|
||||||
|
if err != nil {
|
||||||
|
_ = readLog.Close()
|
||||||
|
_ = writeLog.Close()
|
||||||
|
_ = deleteLog.Close()
|
||||||
|
return nil, xerrors.Errorf("error opening stack log: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &debugLog{
|
||||||
|
readLog: readLog,
|
||||||
|
writeLog: writeLog,
|
||||||
|
deleteLog: deleteLog,
|
||||||
|
stackLog: stackLog,
|
||||||
|
stackMap: make(map[string]string),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) LogReadMiss(cid cid.Cid) {
|
||||||
|
if d == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stack := d.getStack()
|
||||||
|
err := d.readLog.Log("%s %s %s\n", d.timestamp(), cid, stack)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing read log: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) LogWrite(blk blocks.Block) {
|
||||||
|
if d == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var stack string
|
||||||
|
if enableDebugLogWriteTraces {
|
||||||
|
stack = " " + d.getStack()
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.writeLog.Log("%s %s%s\n", d.timestamp(), blk.Cid(), stack)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing write log: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) LogWriteMany(blks []blocks.Block) {
|
||||||
|
if d == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var stack string
|
||||||
|
if enableDebugLogWriteTraces {
|
||||||
|
stack = " " + d.getStack()
|
||||||
|
}
|
||||||
|
|
||||||
|
now := d.timestamp()
|
||||||
|
for _, blk := range blks {
|
||||||
|
err := d.writeLog.Log("%s %s%s\n", now, blk.Cid(), stack)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing write log: %s", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) LogDelete(cids []cid.Cid) {
|
||||||
|
if d == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := d.timestamp()
|
||||||
|
for _, c := range cids {
|
||||||
|
err := d.deleteLog.Log("%s %s\n", now, c)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing delete log: %s", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) Flush() {
|
||||||
|
if d == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// rotate non-empty logs
|
||||||
|
d.readLog.Rotate()
|
||||||
|
d.writeLog.Rotate()
|
||||||
|
d.deleteLog.Rotate()
|
||||||
|
d.stackLog.Rotate()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) Close() error {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err1 := d.readLog.Close()
|
||||||
|
err2 := d.writeLog.Close()
|
||||||
|
err3 := d.deleteLog.Close()
|
||||||
|
err4 := d.stackLog.Close()
|
||||||
|
|
||||||
|
return multierr.Combine(err1, err2, err3, err4)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) getStack() string {
|
||||||
|
sk := d.getNormalizedStackTrace()
|
||||||
|
hash := sha256.Sum256([]byte(sk))
|
||||||
|
key := string(hash[:])
|
||||||
|
|
||||||
|
d.stackMx.Lock()
|
||||||
|
repr, ok := d.stackMap[key]
|
||||||
|
if !ok {
|
||||||
|
repr = hex.EncodeToString(hash[:])
|
||||||
|
d.stackMap[key] = repr
|
||||||
|
|
||||||
|
err := d.stackLog.Log("%s\n%s\n", repr, sk)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error writing stack trace for %s: %s", repr, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.stackMx.Unlock()
|
||||||
|
|
||||||
|
return repr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) getNormalizedStackTrace() string {
|
||||||
|
sk := string(debug.Stack())
|
||||||
|
|
||||||
|
// Normalization for deduplication
|
||||||
|
// skip first line -- it's the goroutine
|
||||||
|
// for each line that ends in a ), remove the call args -- these are the registers
|
||||||
|
lines := strings.Split(sk, "\n")[1:]
|
||||||
|
for i, line := range lines {
|
||||||
|
if len(line) > 0 && line[len(line)-1] == ')' {
|
||||||
|
idx := strings.LastIndex(line, "(")
|
||||||
|
if idx < 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lines[i] = line[:idx]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLog) timestamp() string {
|
||||||
|
ts, _ := time.Now().MarshalText()
|
||||||
|
return string(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openDebugLogOp(basePath, name string) (*debugLogOp, error) {
|
||||||
|
path := filepath.Join(basePath, name)
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error opening %s: %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &debugLogOp{path: path, log: file}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLogOp) Close() error {
|
||||||
|
d.mx.Lock()
|
||||||
|
defer d.mx.Unlock()
|
||||||
|
|
||||||
|
return d.log.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLogOp) Log(template string, arg ...interface{}) error {
|
||||||
|
d.mx.Lock()
|
||||||
|
defer d.mx.Unlock()
|
||||||
|
|
||||||
|
d.count++
|
||||||
|
_, err := fmt.Fprintf(d.log, template, arg...)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *debugLogOp) Rotate() {
|
||||||
|
d.mx.Lock()
|
||||||
|
defer d.mx.Unlock()
|
||||||
|
|
||||||
|
if d.count == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := d.log.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error closing log (file: %s): %s", d.path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
arxivPath := fmt.Sprintf("%s-%d", d.path, time.Now().Unix())
|
||||||
|
err = os.Rename(d.path, arxivPath)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error moving log (file: %s): %s", d.path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
cmd := exec.Command("gzip", arxivPath)
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error compressing log (file: %s): %s", arxivPath, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
d.count = 0
|
||||||
|
d.log, err = os.OpenFile(d.path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error opening log (file: %s): %s", d.path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
38
blockstore/splitstore/markset.go
Normal file
38
blockstore/splitstore/markset.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errMarkSetClosed = errors.New("markset closed")
|
||||||
|
|
||||||
|
// MarkSet is a utility to keep track of seen CID, and later query for them.
|
||||||
|
//
|
||||||
|
// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt).
|
||||||
|
// * If a probabilistic result is acceptable, it can be backed by a bloom filter
|
||||||
|
type MarkSet interface {
|
||||||
|
Mark(cid.Cid) error
|
||||||
|
Has(cid.Cid) (bool, error)
|
||||||
|
Close() error
|
||||||
|
SetConcurrent()
|
||||||
|
}
|
||||||
|
|
||||||
|
type MarkSetEnv interface {
|
||||||
|
Create(name string, sizeHint int64) (MarkSet, error)
|
||||||
|
Close() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func OpenMarkSetEnv(path string, mtype string) (MarkSetEnv, error) {
|
||||||
|
switch mtype {
|
||||||
|
case "bloom":
|
||||||
|
return NewBloomMarkSetEnv()
|
||||||
|
case "map":
|
||||||
|
return NewMapMarkSetEnv()
|
||||||
|
default:
|
||||||
|
return nil, xerrors.Errorf("unknown mark set type %s", mtype)
|
||||||
|
}
|
||||||
|
}
|
||||||
107
blockstore/splitstore/markset_bloom.go
Normal file
107
blockstore/splitstore/markset_bloom.go
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
bbloom "github.com/ipfs/bbloom"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
BloomFilterMinSize = 10_000_000
|
||||||
|
BloomFilterProbability = 0.01
|
||||||
|
)
|
||||||
|
|
||||||
|
type BloomMarkSetEnv struct{}
|
||||||
|
|
||||||
|
var _ MarkSetEnv = (*BloomMarkSetEnv)(nil)
|
||||||
|
|
||||||
|
type BloomMarkSet struct {
|
||||||
|
salt []byte
|
||||||
|
mx sync.RWMutex
|
||||||
|
bf *bbloom.Bloom
|
||||||
|
ts bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ MarkSet = (*BloomMarkSet)(nil)
|
||||||
|
|
||||||
|
func NewBloomMarkSetEnv() (*BloomMarkSetEnv, error) {
|
||||||
|
return &BloomMarkSetEnv{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *BloomMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||||
|
size := int64(BloomFilterMinSize)
|
||||||
|
for size < sizeHint {
|
||||||
|
size += BloomFilterMinSize
|
||||||
|
}
|
||||||
|
|
||||||
|
salt := make([]byte, 4)
|
||||||
|
_, err := rand.Read(salt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error reading salt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bf, err := bbloom.New(float64(size), BloomFilterProbability)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error creating bloom filter: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &BloomMarkSet{salt: salt, bf: bf}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *BloomMarkSetEnv) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BloomMarkSet) saltedKey(cid cid.Cid) []byte {
|
||||||
|
hash := cid.Hash()
|
||||||
|
key := make([]byte, len(s.salt)+len(hash))
|
||||||
|
n := copy(key, s.salt)
|
||||||
|
copy(key[n:], hash)
|
||||||
|
rehash := sha256.Sum256(key)
|
||||||
|
return rehash[:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BloomMarkSet) Mark(cid cid.Cid) error {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.bf == nil {
|
||||||
|
return errMarkSetClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
s.bf.Add(s.saltedKey(cid))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BloomMarkSet) Has(cid cid.Cid) (bool, error) {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.RLock()
|
||||||
|
defer s.mx.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.bf == nil {
|
||||||
|
return false, errMarkSetClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.bf.Has(s.saltedKey(cid)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BloomMarkSet) Close() error {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
}
|
||||||
|
s.bf = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BloomMarkSet) SetConcurrent() {
|
||||||
|
s.ts = true
|
||||||
|
}
|
||||||
75
blockstore/splitstore/markset_map.go
Normal file
75
blockstore/splitstore/markset_map.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MapMarkSetEnv struct{}
|
||||||
|
|
||||||
|
var _ MarkSetEnv = (*MapMarkSetEnv)(nil)
|
||||||
|
|
||||||
|
type MapMarkSet struct {
|
||||||
|
mx sync.RWMutex
|
||||||
|
set map[string]struct{}
|
||||||
|
|
||||||
|
ts bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ MarkSet = (*MapMarkSet)(nil)
|
||||||
|
|
||||||
|
func NewMapMarkSetEnv() (*MapMarkSetEnv, error) {
|
||||||
|
return &MapMarkSetEnv{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) {
|
||||||
|
return &MapMarkSet{
|
||||||
|
set: make(map[string]struct{}, sizeHint),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *MapMarkSetEnv) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MapMarkSet) Mark(cid cid.Cid) error {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.set == nil {
|
||||||
|
return errMarkSetClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
s.set[string(cid.Hash())] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.RLock()
|
||||||
|
defer s.mx.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.set == nil {
|
||||||
|
return false, errMarkSetClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := s.set[string(cid.Hash())]
|
||||||
|
return ok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MapMarkSet) Close() error {
|
||||||
|
if s.ts {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
}
|
||||||
|
s.set = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *MapMarkSet) SetConcurrent() {
|
||||||
|
s.ts = true
|
||||||
|
}
|
||||||
138
blockstore/splitstore/markset_test.go
Normal file
138
blockstore/splitstore/markset_test.go
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMapMarkSet(t *testing.T) {
|
||||||
|
testMarkSet(t, "map")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBloomMarkSet(t *testing.T) {
|
||||||
|
testMarkSet(t, "bloom")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMarkSet(t *testing.T, lsType string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
path, err := ioutil.TempDir("", "sweep-test.*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
env, err := OpenMarkSetEnv(path, lsType)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer env.Close() //nolint:errcheck
|
||||||
|
|
||||||
|
hotSet, err := env.Create("hot", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
coldSet, err := env.Create("cold", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
makeCid := func(key string) cid.Cid {
|
||||||
|
h, err := multihash.Sum([]byte(key), multihash.SHA2_256, -1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cid.NewCidV1(cid.Raw, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
mustHave := func(s MarkSet, cid cid.Cid) {
|
||||||
|
has, err := s.Has(cid)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !has {
|
||||||
|
t.Fatal("mark not found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mustNotHave := func(s MarkSet, cid cid.Cid) {
|
||||||
|
has, err := s.Has(cid)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
t.Fatal("unexpected mark")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
k1 := makeCid("a")
|
||||||
|
k2 := makeCid("b")
|
||||||
|
k3 := makeCid("c")
|
||||||
|
k4 := makeCid("d")
|
||||||
|
|
||||||
|
hotSet.Mark(k1) //nolint
|
||||||
|
hotSet.Mark(k2) //nolint
|
||||||
|
coldSet.Mark(k3) //nolint
|
||||||
|
|
||||||
|
mustHave(hotSet, k1)
|
||||||
|
mustHave(hotSet, k2)
|
||||||
|
mustNotHave(hotSet, k3)
|
||||||
|
mustNotHave(hotSet, k4)
|
||||||
|
|
||||||
|
mustNotHave(coldSet, k1)
|
||||||
|
mustNotHave(coldSet, k2)
|
||||||
|
mustHave(coldSet, k3)
|
||||||
|
mustNotHave(coldSet, k4)
|
||||||
|
|
||||||
|
// close them and reopen to redo the dance
|
||||||
|
|
||||||
|
err = hotSet.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = coldSet.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hotSet, err = env.Create("hot", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
coldSet, err = env.Create("cold", 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
hotSet.Mark(k3) //nolint
|
||||||
|
hotSet.Mark(k4) //nolint
|
||||||
|
coldSet.Mark(k1) //nolint
|
||||||
|
|
||||||
|
mustNotHave(hotSet, k1)
|
||||||
|
mustNotHave(hotSet, k2)
|
||||||
|
mustHave(hotSet, k3)
|
||||||
|
mustHave(hotSet, k4)
|
||||||
|
|
||||||
|
mustHave(coldSet, k1)
|
||||||
|
mustNotHave(coldSet, k2)
|
||||||
|
mustNotHave(coldSet, k3)
|
||||||
|
mustNotHave(coldSet, k4)
|
||||||
|
|
||||||
|
err = hotSet.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = coldSet.Close()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
568
blockstore/splitstore/splitstore.go
Normal file
568
blockstore/splitstore/splitstore.go
Normal file
@ -0,0 +1,568 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"go.uber.org/multierr"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
dstore "github.com/ipfs/go-datastore"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/metrics"
|
||||||
|
|
||||||
|
"go.opencensus.io/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// baseEpochKey stores the base epoch (last compaction epoch) in the
|
||||||
|
// metadata store.
|
||||||
|
baseEpochKey = dstore.NewKey("/splitstore/baseEpoch")
|
||||||
|
|
||||||
|
// warmupEpochKey stores whether a hot store warmup has been performed.
|
||||||
|
// On first start, the splitstore will walk the state tree and will copy
|
||||||
|
// all active blocks into the hotstore.
|
||||||
|
warmupEpochKey = dstore.NewKey("/splitstore/warmupEpoch")
|
||||||
|
|
||||||
|
// markSetSizeKey stores the current estimate for the mark set size.
|
||||||
|
// this is first computed at warmup and updated in every compaction
|
||||||
|
markSetSizeKey = dstore.NewKey("/splitstore/markSetSize")
|
||||||
|
|
||||||
|
// compactionIndexKey stores the compaction index (serial number)
|
||||||
|
compactionIndexKey = dstore.NewKey("/splitstore/compactionIndex")
|
||||||
|
|
||||||
|
log = logging.Logger("splitstore")
|
||||||
|
|
||||||
|
// set this to true if you are debugging the splitstore to enable debug logging
|
||||||
|
enableDebugLog = false
|
||||||
|
// set this to true if you want to track origin stack traces in the write log
|
||||||
|
enableDebugLogWriteTraces = false
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG") == "1" {
|
||||||
|
enableDebugLog = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if os.Getenv("LOTUS_SPLITSTORE_DEBUG_LOG_WRITE_TRACES") == "1" {
|
||||||
|
enableDebugLogWriteTraces = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// MarkSetType is the type of mark set to use.
|
||||||
|
//
|
||||||
|
// Only current sane value is "map", but we may add an option for a disk-backed
|
||||||
|
// markset for memory-constrained situations.
|
||||||
|
MarkSetType string
|
||||||
|
|
||||||
|
// DiscardColdBlocks indicates whether to skip moving cold blocks to the coldstore.
|
||||||
|
// If the splitstore is running with a noop coldstore then this option is set to true
|
||||||
|
// which skips moving (as it is a noop, but still takes time to read all the cold objects)
|
||||||
|
// and directly purges cold blocks.
|
||||||
|
DiscardColdBlocks bool
|
||||||
|
|
||||||
|
// HotstoreMessageRetention indicates the hotstore retention policy for messages.
|
||||||
|
// It has the following semantics:
|
||||||
|
// - a value of 0 will only retain messages within the compaction boundary (4 finalities)
|
||||||
|
// - a positive integer indicates the number of finalities, outside the compaction boundary,
|
||||||
|
// for which messages will be retained in the hotstore.
|
||||||
|
HotStoreMessageRetention uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChainAccessor allows the Splitstore to access the chain. It will most likely
|
||||||
|
// be a ChainStore at runtime.
|
||||||
|
type ChainAccessor interface {
|
||||||
|
GetTipsetByHeight(context.Context, abi.ChainEpoch, *types.TipSet, bool) (*types.TipSet, error)
|
||||||
|
GetHeaviestTipSet() *types.TipSet
|
||||||
|
SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hotstore is the interface that must be satisfied by the hot blockstore; it is an extension
|
||||||
|
// of the Blockstore interface with the traits we need for compaction.
|
||||||
|
type hotstore interface {
|
||||||
|
bstore.Blockstore
|
||||||
|
bstore.BlockstoreIterator
|
||||||
|
}
|
||||||
|
|
||||||
|
type SplitStore struct {
|
||||||
|
compacting int32 // compaction/prune/warmup in progress
|
||||||
|
closing int32 // the splitstore is closing
|
||||||
|
|
||||||
|
cfg *Config
|
||||||
|
|
||||||
|
mx sync.Mutex
|
||||||
|
warmupEpoch abi.ChainEpoch // protected by mx
|
||||||
|
baseEpoch abi.ChainEpoch // protected by compaction lock
|
||||||
|
|
||||||
|
headChangeMx sync.Mutex
|
||||||
|
|
||||||
|
coldPurgeSize int
|
||||||
|
|
||||||
|
chain ChainAccessor
|
||||||
|
ds dstore.Datastore
|
||||||
|
cold bstore.Blockstore
|
||||||
|
hot hotstore
|
||||||
|
|
||||||
|
markSetEnv MarkSetEnv
|
||||||
|
markSetSize int64
|
||||||
|
|
||||||
|
compactionIndex int64
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
|
cancel func()
|
||||||
|
|
||||||
|
debug *debugLog
|
||||||
|
|
||||||
|
// transactional protection for concurrent read/writes during compaction
|
||||||
|
txnLk sync.RWMutex
|
||||||
|
txnViewsMx sync.Mutex
|
||||||
|
txnViewsCond sync.Cond
|
||||||
|
txnViews int
|
||||||
|
txnViewsWaiting bool
|
||||||
|
txnActive bool
|
||||||
|
txnProtect MarkSet
|
||||||
|
txnRefsMx sync.Mutex
|
||||||
|
txnRefs map[cid.Cid]struct{}
|
||||||
|
txnMissing map[cid.Cid]struct{}
|
||||||
|
|
||||||
|
// registered protectors
|
||||||
|
protectors []func(func(cid.Cid) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ bstore.Blockstore = (*SplitStore)(nil)
|
||||||
|
|
||||||
|
// Open opens an existing splistore, or creates a new splitstore. The splitstore
|
||||||
|
// is backed by the provided hot and cold stores. The returned SplitStore MUST be
|
||||||
|
// attached to the ChainStore with Start in order to trigger compaction.
|
||||||
|
func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Config) (*SplitStore, error) {
|
||||||
|
// hot blockstore must support the hotstore interface
|
||||||
|
hots, ok := hot.(hotstore)
|
||||||
|
if !ok {
|
||||||
|
// be specific about what is missing
|
||||||
|
if _, ok := hot.(bstore.BlockstoreIterator); !ok {
|
||||||
|
return nil, xerrors.Errorf("hot blockstore does not support efficient iteration: %T", hot)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, xerrors.Errorf("hot blockstore does not support the necessary traits: %T", hot)
|
||||||
|
}
|
||||||
|
|
||||||
|
// the markset env
|
||||||
|
markSetEnv, err := OpenMarkSetEnv(path, cfg.MarkSetType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// and now we can make a SplitStore
|
||||||
|
ss := &SplitStore{
|
||||||
|
cfg: cfg,
|
||||||
|
ds: ds,
|
||||||
|
cold: cold,
|
||||||
|
hot: hots,
|
||||||
|
markSetEnv: markSetEnv,
|
||||||
|
|
||||||
|
coldPurgeSize: defaultColdPurgeSize,
|
||||||
|
}
|
||||||
|
|
||||||
|
ss.txnViewsCond.L = &ss.txnViewsMx
|
||||||
|
ss.ctx, ss.cancel = context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
if enableDebugLog {
|
||||||
|
ss.debug, err = openDebugLog(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Blockstore interface
|
||||||
|
func (s *SplitStore) DeleteBlock(_ cid.Cid) error {
|
||||||
|
// afaict we don't seem to be using this method, so it's not implemented
|
||||||
|
return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) DeleteMany(_ []cid.Cid) error {
|
||||||
|
// afaict we don't seem to be using this method, so it's not implemented
|
||||||
|
return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) Has(cid cid.Cid) (bool, error) {
|
||||||
|
if isIdentiyCid(cid) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.txnLk.RLock()
|
||||||
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
|
has, err := s.hot.Has(cid)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return has, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
s.trackTxnRef(cid)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.cold.Has(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
|
if isIdentiyCid(cid) {
|
||||||
|
data, err := decodeIdentityCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.txnLk.RLock()
|
||||||
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
|
blk, err := s.hot.Get(cid)
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.trackTxnRef(cid)
|
||||||
|
return blk, nil
|
||||||
|
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
if s.isWarm() {
|
||||||
|
s.debug.LogReadMiss(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
blk, err = s.cold.Get(cid)
|
||||||
|
if err == nil {
|
||||||
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
|
|
||||||
|
}
|
||||||
|
return blk, err
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) GetSize(cid cid.Cid) (int, error) {
|
||||||
|
if isIdentiyCid(cid) {
|
||||||
|
data, err := decodeIdentityCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.txnLk.RLock()
|
||||||
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
|
size, err := s.hot.GetSize(cid)
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.trackTxnRef(cid)
|
||||||
|
return size, nil
|
||||||
|
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
if s.isWarm() {
|
||||||
|
s.debug.LogReadMiss(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err = s.cold.GetSize(cid)
|
||||||
|
if err == nil {
|
||||||
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
|
}
|
||||||
|
return size, err
|
||||||
|
|
||||||
|
default:
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) Put(blk blocks.Block) error {
|
||||||
|
if isIdentiyCid(blk.Cid()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.txnLk.RLock()
|
||||||
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
|
err := s.hot.Put(blk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.debug.LogWrite(blk)
|
||||||
|
|
||||||
|
s.trackTxnRef(blk.Cid())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) PutMany(blks []blocks.Block) error {
|
||||||
|
// filter identites
|
||||||
|
idcids := 0
|
||||||
|
for _, blk := range blks {
|
||||||
|
if isIdentiyCid(blk.Cid()) {
|
||||||
|
idcids++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if idcids > 0 {
|
||||||
|
if idcids == len(blks) {
|
||||||
|
// it's all identities
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
filtered := make([]blocks.Block, 0, len(blks)-idcids)
|
||||||
|
for _, blk := range blks {
|
||||||
|
if isIdentiyCid(blk.Cid()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
blks = filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
batch := make([]cid.Cid, 0, len(blks))
|
||||||
|
for _, blk := range blks {
|
||||||
|
batch = append(batch, blk.Cid())
|
||||||
|
}
|
||||||
|
|
||||||
|
s.txnLk.RLock()
|
||||||
|
defer s.txnLk.RUnlock()
|
||||||
|
|
||||||
|
err := s.hot.PutMany(blks)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.debug.LogWriteMany(blks)
|
||||||
|
|
||||||
|
s.trackTxnRefMany(batch)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
|
chHot, err := s.hot.AllKeysChan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chCold, err := s.cold.AllKeysChan(ctx)
|
||||||
|
if err != nil {
|
||||||
|
cancel()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
seen := cid.NewSet()
|
||||||
|
ch := make(chan cid.Cid, 8) // buffer is arbitrary, just enough to avoid context switches
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
for _, in := range []<-chan cid.Cid{chHot, chCold} {
|
||||||
|
for c := range in {
|
||||||
|
// ensure we only emit each key once
|
||||||
|
if !seen.Visit(c) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case ch <- c:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) HashOnRead(enabled bool) {
|
||||||
|
s.hot.HashOnRead(enabled)
|
||||||
|
s.cold.HashOnRead(enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error {
|
||||||
|
if isIdentiyCid(cid) {
|
||||||
|
data, err := decodeIdentityCid(cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cb(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// views are (optimistically) protected two-fold:
|
||||||
|
// - if there is an active transaction, then the reference is protected.
|
||||||
|
// - if there is no active transaction, active views are tracked in a
|
||||||
|
// wait group and compaction is inhibited from starting until they
|
||||||
|
// have all completed. this is necessary to ensure that a (very) long-running
|
||||||
|
// view can't have its data pointer deleted, which would be catastrophic.
|
||||||
|
// Note that we can't just RLock for the duration of the view, as this could
|
||||||
|
// lead to deadlock with recursive views.
|
||||||
|
s.protectView(cid)
|
||||||
|
defer s.viewDone()
|
||||||
|
|
||||||
|
err := s.hot.View(cid, cb)
|
||||||
|
switch err {
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
if s.isWarm() {
|
||||||
|
s.debug.LogReadMiss(cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.cold.View(cid, cb)
|
||||||
|
if err == nil {
|
||||||
|
stats.Record(s.ctx, metrics.SplitstoreMiss.M(1))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) isWarm() bool {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
return s.warmupEpoch > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// State tracking
|
||||||
|
func (s *SplitStore) Start(chain ChainAccessor) error {
|
||||||
|
s.chain = chain
|
||||||
|
curTs := chain.GetHeaviestTipSet()
|
||||||
|
|
||||||
|
// should we warmup
|
||||||
|
warmup := false
|
||||||
|
|
||||||
|
// load base epoch from metadata ds
|
||||||
|
// if none, then use current epoch because it's a fresh start
|
||||||
|
bs, err := s.ds.Get(baseEpochKey)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.baseEpoch = bytesToEpoch(bs)
|
||||||
|
|
||||||
|
case dstore.ErrNotFound:
|
||||||
|
if curTs == nil {
|
||||||
|
// this can happen in some tests
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.setBaseEpoch(curTs.Height())
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error saving base epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("error loading base epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// load warmup epoch from metadata ds
|
||||||
|
bs, err = s.ds.Get(warmupEpochKey)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.warmupEpoch = bytesToEpoch(bs)
|
||||||
|
|
||||||
|
case dstore.ErrNotFound:
|
||||||
|
warmup = true
|
||||||
|
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("error loading warmup epoch: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// load markSetSize from metadata ds to provide a size hint for marksets
|
||||||
|
bs, err = s.ds.Get(markSetSizeKey)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.markSetSize = bytesToInt64(bs)
|
||||||
|
|
||||||
|
case dstore.ErrNotFound:
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("error loading mark set size: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// load compactionIndex from metadata ds to provide a hint as to when to perform moving gc
|
||||||
|
bs, err = s.ds.Get(compactionIndexKey)
|
||||||
|
switch err {
|
||||||
|
case nil:
|
||||||
|
s.compactionIndex = bytesToInt64(bs)
|
||||||
|
|
||||||
|
case dstore.ErrNotFound:
|
||||||
|
// this is potentially an upgrade from splitstore v0; schedule a warmup as v0 has
|
||||||
|
// some issues with hot references leaking into the coldstore.
|
||||||
|
warmup = true
|
||||||
|
default:
|
||||||
|
return xerrors.Errorf("error loading compaction index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch)
|
||||||
|
|
||||||
|
if warmup {
|
||||||
|
err = s.warmup(curTs)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error starting warmup: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// watch the chain
|
||||||
|
chain.SubscribeHeadChanges(s.HeadChange)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) AddProtector(protector func(func(cid.Cid) error) error) {
|
||||||
|
s.mx.Lock()
|
||||||
|
defer s.mx.Unlock()
|
||||||
|
|
||||||
|
s.protectors = append(s.protectors, protector)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) Close() error {
|
||||||
|
if !atomic.CompareAndSwapInt32(&s.closing, 0, 1) {
|
||||||
|
// already closing
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&s.compacting) == 1 {
|
||||||
|
log.Warn("close with ongoing compaction in progress; waiting for it to finish...")
|
||||||
|
for atomic.LoadInt32(&s.compacting) == 1 {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.cancel()
|
||||||
|
return multierr.Combine(s.markSetEnv.Close(), s.debug.Close())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) checkClosing() error {
|
||||||
|
if atomic.LoadInt32(&s.closing) == 1 {
|
||||||
|
return xerrors.Errorf("splitstore is closing")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error {
|
||||||
|
s.baseEpoch = epoch
|
||||||
|
return s.ds.Put(baseEpochKey, epochToBytes(epoch))
|
||||||
|
}
|
||||||
1144
blockstore/splitstore/splitstore_compact.go
Normal file
1144
blockstore/splitstore/splitstore_compact.go
Normal file
File diff suppressed because it is too large
Load Diff
114
blockstore/splitstore/splitstore_expose.go
Normal file
114
blockstore/splitstore/splitstore_expose.go
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
type exposedSplitStore struct {
|
||||||
|
s *SplitStore
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ bstore.Blockstore = (*exposedSplitStore)(nil)
|
||||||
|
|
||||||
|
func (s *SplitStore) Expose() bstore.Blockstore {
|
||||||
|
return &exposedSplitStore{s: s}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error {
|
||||||
|
return errors.New("DeleteBlock: operation not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error {
|
||||||
|
return errors.New("DeleteMany: operation not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) {
|
||||||
|
if isIdentiyCid(c) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
has, err := es.s.hot.Has(c)
|
||||||
|
if has || err != nil {
|
||||||
|
return has, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return es.s.cold.Has(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) {
|
||||||
|
if isIdentiyCid(c) {
|
||||||
|
data, err := decodeIdentityCid(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks.NewBlockWithCid(data, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
blk, err := es.s.hot.Get(c)
|
||||||
|
switch err {
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
return es.s.cold.Get(c)
|
||||||
|
default:
|
||||||
|
return blk, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) {
|
||||||
|
if isIdentiyCid(c) {
|
||||||
|
data, err := decodeIdentityCid(c)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := es.s.hot.GetSize(c)
|
||||||
|
switch err {
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
return es.s.cold.GetSize(c)
|
||||||
|
default:
|
||||||
|
return size, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) Put(blk blocks.Block) error {
|
||||||
|
return es.s.Put(blk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) PutMany(blks []blocks.Block) error {
|
||||||
|
return es.s.PutMany(blks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return es.s.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) HashOnRead(enabled bool) {}
|
||||||
|
|
||||||
|
func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error {
|
||||||
|
if isIdentiyCid(c) {
|
||||||
|
data, err := decodeIdentityCid(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := es.s.hot.View(c, f)
|
||||||
|
switch err {
|
||||||
|
case bstore.ErrNotFound:
|
||||||
|
return es.s.cold.View(c, f)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
30
blockstore/splitstore/splitstore_gc.go
Normal file
30
blockstore/splitstore/splitstore_gc.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *SplitStore) gcHotstore() {
|
||||||
|
if err := s.gcBlockstoreOnline(s.hot); err != nil {
|
||||||
|
log.Warnf("error garbage collecting hostore: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SplitStore) gcBlockstoreOnline(b bstore.Blockstore) error {
|
||||||
|
if gc, ok := b.(bstore.BlockstoreGC); ok {
|
||||||
|
log.Info("garbage collecting blockstore")
|
||||||
|
startGC := time.Now()
|
||||||
|
|
||||||
|
if err := gc.CollectGarbage(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("garbage collecting hotstore done", "took", time.Since(startGC))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("blockstore doesn't support online gc: %T", b)
|
||||||
|
}
|
||||||
381
blockstore/splitstore/splitstore_test.go
Normal file
381
blockstore/splitstore/splitstore_test.go
Normal file
@ -0,0 +1,381 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types/mock"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
datastore "github.com/ipfs/go-datastore"
|
||||||
|
dssync "github.com/ipfs/go-datastore/sync"
|
||||||
|
logging "github.com/ipfs/go-log/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
CompactionThreshold = 5
|
||||||
|
CompactionBoundary = 2
|
||||||
|
logging.SetLogLevel("splitstore", "DEBUG")
|
||||||
|
}
|
||||||
|
|
||||||
|
func testSplitStore(t *testing.T, cfg *Config) {
|
||||||
|
chain := &mockChain{t: t}
|
||||||
|
|
||||||
|
// the myriads of stores
|
||||||
|
ds := dssync.MutexWrap(datastore.NewMapDatastore())
|
||||||
|
hot := newMockStore()
|
||||||
|
cold := newMockStore()
|
||||||
|
|
||||||
|
// this is necessary to avoid the garbage mock puts in the blocks
|
||||||
|
garbage := blocks.NewBlock([]byte{1, 2, 3})
|
||||||
|
err := cold.Put(garbage)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// genesis
|
||||||
|
genBlock := mock.MkBlock(nil, 0, 0)
|
||||||
|
genBlock.Messages = garbage.Cid()
|
||||||
|
genBlock.ParentMessageReceipts = garbage.Cid()
|
||||||
|
genBlock.ParentStateRoot = garbage.Cid()
|
||||||
|
genBlock.Timestamp = uint64(time.Now().Unix())
|
||||||
|
|
||||||
|
genTs := mock.TipSet(genBlock)
|
||||||
|
chain.push(genTs)
|
||||||
|
|
||||||
|
// put the genesis block to cold store
|
||||||
|
blk, err := genBlock.ToStorageBlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cold.Put(blk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a garbage block that is protected with a rgistered protector
|
||||||
|
protected := blocks.NewBlock([]byte("protected!"))
|
||||||
|
err = hot.Put(protected)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// and another one that is not protected
|
||||||
|
unprotected := blocks.NewBlock([]byte("unprotected!"))
|
||||||
|
err = hot.Put(unprotected)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// open the splitstore
|
||||||
|
ss, err := Open("", ds, hot, cold, cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer ss.Close() //nolint
|
||||||
|
|
||||||
|
// register our protector
|
||||||
|
ss.AddProtector(func(protect func(cid.Cid) error) error {
|
||||||
|
return protect(protected.Cid())
|
||||||
|
})
|
||||||
|
|
||||||
|
err = ss.Start(chain)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// make some tipsets, but not enough to cause compaction
|
||||||
|
mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet {
|
||||||
|
blk := mock.MkBlock(curTs, uint64(i), uint64(i))
|
||||||
|
|
||||||
|
blk.Messages = garbage.Cid()
|
||||||
|
blk.ParentMessageReceipts = garbage.Cid()
|
||||||
|
blk.ParentStateRoot = stateRoot.Cid()
|
||||||
|
blk.Timestamp = uint64(time.Now().Unix())
|
||||||
|
|
||||||
|
sblk, err := blk.ToStorageBlock()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = ss.Put(stateRoot)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = ss.Put(sblk)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
ts := mock.TipSet(blk)
|
||||||
|
chain.push(ts)
|
||||||
|
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForCompaction := func() {
|
||||||
|
for atomic.LoadInt32(&ss.compacting) == 1 {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
curTs := genTs
|
||||||
|
for i := 1; i < 5; i++ {
|
||||||
|
stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
|
||||||
|
curTs = mkBlock(curTs, i, stateRoot)
|
||||||
|
waitForCompaction()
|
||||||
|
}
|
||||||
|
|
||||||
|
// count objects in the cold and hot stores
|
||||||
|
countBlocks := func(bs blockstore.Blockstore) int {
|
||||||
|
count := 0
|
||||||
|
_ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error {
|
||||||
|
count++
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
coldCnt := countBlocks(cold)
|
||||||
|
hotCnt := countBlocks(hot)
|
||||||
|
|
||||||
|
if coldCnt != 2 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 2, coldCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hotCnt != 12 {
|
||||||
|
t.Errorf("expected %d blocks, but got %d", 12, hotCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// trigger a compaction
|
||||||
|
for i := 5; i < 10; i++ {
|
||||||
|
stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7})
|
||||||
|
curTs = mkBlock(curTs, i, stateRoot)
|
||||||
|
waitForCompaction()
|
||||||
|
}
|
||||||
|
|
||||||
|
coldCnt = countBlocks(cold)
|
||||||
|
hotCnt = countBlocks(hot)
|
||||||
|
|
||||||
|
if coldCnt != 6 {
|
||||||
|
t.Errorf("expected %d cold blocks, but got %d", 6, coldCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hotCnt != 18 {
|
||||||
|
t.Errorf("expected %d hot blocks, but got %d", 18, hotCnt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure our protected block is still there
|
||||||
|
has, err := hot.Has(protected.Cid())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !has {
|
||||||
|
t.Fatal("protected block is missing from hotstore")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure our unprotected block is in the coldstore now
|
||||||
|
has, err = hot.Has(unprotected.Cid())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
t.Fatal("unprotected block is still in hotstore")
|
||||||
|
}
|
||||||
|
|
||||||
|
has, err = cold.Has(unprotected.Cid())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !has {
|
||||||
|
t.Fatal("unprotected block is missing from coldstore")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we can revert without panicking.
|
||||||
|
chain.revert(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSplitStoreCompaction(t *testing.T) {
|
||||||
|
testSplitStore(t, &Config{MarkSetType: "map"})
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockChain struct {
|
||||||
|
t testing.TB
|
||||||
|
|
||||||
|
sync.Mutex
|
||||||
|
genesis *types.BlockHeader
|
||||||
|
tipsets []*types.TipSet
|
||||||
|
listener func(revert []*types.TipSet, apply []*types.TipSet) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockChain) push(ts *types.TipSet) {
|
||||||
|
c.Lock()
|
||||||
|
c.tipsets = append(c.tipsets, ts)
|
||||||
|
if c.genesis == nil {
|
||||||
|
c.genesis = ts.Blocks()[0]
|
||||||
|
}
|
||||||
|
c.Unlock()
|
||||||
|
|
||||||
|
if c.listener != nil {
|
||||||
|
err := c.listener(nil, []*types.TipSet{ts})
|
||||||
|
if err != nil {
|
||||||
|
c.t.Errorf("mockchain: error dispatching listener: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockChain) revert(count int) {
|
||||||
|
c.Lock()
|
||||||
|
revert := make([]*types.TipSet, count)
|
||||||
|
if count > len(c.tipsets) {
|
||||||
|
c.Unlock()
|
||||||
|
c.t.Fatalf("not enough tipsets to revert")
|
||||||
|
}
|
||||||
|
copy(revert, c.tipsets[len(c.tipsets)-count:])
|
||||||
|
c.tipsets = c.tipsets[:len(c.tipsets)-count]
|
||||||
|
c.Unlock()
|
||||||
|
|
||||||
|
if c.listener != nil {
|
||||||
|
err := c.listener(revert, nil)
|
||||||
|
if err != nil {
|
||||||
|
c.t.Errorf("mockchain: error dispatching listener: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockChain) GetTipsetByHeight(_ context.Context, epoch abi.ChainEpoch, _ *types.TipSet, _ bool) (*types.TipSet, error) {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
iEpoch := int(epoch)
|
||||||
|
if iEpoch > len(c.tipsets) {
|
||||||
|
return nil, fmt.Errorf("bad epoch %d", epoch)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.tipsets[iEpoch], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockChain) GetHeaviestTipSet() *types.TipSet {
|
||||||
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
return c.tipsets[len(c.tipsets)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockChain) SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) {
|
||||||
|
c.listener = change
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockStore struct {
|
||||||
|
mx sync.Mutex
|
||||||
|
set map[cid.Cid]blocks.Block
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockStore() *mockStore {
|
||||||
|
return &mockStore{set: make(map[cid.Cid]blocks.Block)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) Has(cid cid.Cid) (bool, error) {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
_, ok := b.set[cid]
|
||||||
|
return ok, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) HashOnRead(hor bool) {}
|
||||||
|
|
||||||
|
func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
blk, ok := b.set[cid]
|
||||||
|
if !ok {
|
||||||
|
return nil, blockstore.ErrNotFound
|
||||||
|
}
|
||||||
|
return blk, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) GetSize(cid cid.Cid) (int, error) {
|
||||||
|
blk, err := b.Get(cid)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(blk.RawData()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error {
|
||||||
|
blk, err := b.Get(cid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f(blk.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) Put(blk blocks.Block) error {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
b.set[blk.Cid()] = blk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) PutMany(blks []blocks.Block) error {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
for _, blk := range blks {
|
||||||
|
b.set[blk.Cid()] = blk
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) DeleteBlock(cid cid.Cid) error {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
delete(b.set, cid)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) DeleteMany(cids []cid.Cid) error {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
for _, c := range cids {
|
||||||
|
delete(b.set, c)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return nil, errors.New("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) ForEachKey(f func(cid.Cid) error) error {
|
||||||
|
b.mx.Lock()
|
||||||
|
defer b.mx.Unlock()
|
||||||
|
|
||||||
|
for c := range b.set {
|
||||||
|
err := f(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *mockStore) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
67
blockstore/splitstore/splitstore_util.go
Normal file
67
blockstore/splitstore/splitstore_util.go
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
mh "github.com/multiformats/go-multihash"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
)
|
||||||
|
|
||||||
|
func epochToBytes(epoch abi.ChainEpoch) []byte {
|
||||||
|
return uint64ToBytes(uint64(epoch))
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesToEpoch(buf []byte) abi.ChainEpoch {
|
||||||
|
return abi.ChainEpoch(bytesToUint64(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
func int64ToBytes(i int64) []byte {
|
||||||
|
return uint64ToBytes(uint64(i))
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesToInt64(buf []byte) int64 {
|
||||||
|
return int64(bytesToUint64(buf))
|
||||||
|
}
|
||||||
|
|
||||||
|
func uint64ToBytes(i uint64) []byte {
|
||||||
|
buf := make([]byte, 16)
|
||||||
|
n := binary.PutUvarint(buf, i)
|
||||||
|
return buf[:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesToUint64(buf []byte) uint64 {
|
||||||
|
i, _ := binary.Uvarint(buf)
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func isUnitaryObject(c cid.Cid) bool {
|
||||||
|
pre := c.Prefix()
|
||||||
|
switch pre.Codec {
|
||||||
|
case cid.FilCommitmentSealed, cid.FilCommitmentUnsealed:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return pre.MhType == mh.IDENTITY
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIdentiyCid(c cid.Cid) bool {
|
||||||
|
return c.Prefix().MhType == mh.IDENTITY
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeIdentityCid(c cid.Cid) ([]byte, error) {
|
||||||
|
dmh, err := mh.Decode(c.Hash())
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("error decoding identity cid %s: %w", c, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanity check
|
||||||
|
if dmh.Code != mh.IDENTITY {
|
||||||
|
return nil, xerrors.Errorf("error decoding identity cid %s: hash type is not identity", c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dmh.Digest, nil
|
||||||
|
}
|
||||||
126
blockstore/splitstore/splitstore_warmup.go
Normal file
126
blockstore/splitstore/splitstore_warmup.go
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
package splitstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
cid "github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
bstore "github.com/filecoin-project/lotus/blockstore"
|
||||||
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// warmup acuiqres the compaction lock and spawns a goroutine to warm up the hotstore;
|
||||||
|
// this is necessary when we sync from a snapshot or when we enable the splitstore
|
||||||
|
// on top of an existing blockstore (which becomes the coldstore).
|
||||||
|
func (s *SplitStore) warmup(curTs *types.TipSet) error {
|
||||||
|
if !atomic.CompareAndSwapInt32(&s.compacting, 0, 1) {
|
||||||
|
return xerrors.Errorf("error locking compaction")
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer atomic.StoreInt32(&s.compacting, 0)
|
||||||
|
|
||||||
|
log.Info("warming up hotstore")
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
err := s.doWarmup(curTs)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error warming up hotstore: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("warm up done", "took", time.Since(start))
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// the actual warmup procedure; it walks the chain loading all state roots at the boundary
|
||||||
|
// and headers all the way up to genesis.
|
||||||
|
// objects are written in batches so as to minimize overhead.
|
||||||
|
func (s *SplitStore) doWarmup(curTs *types.TipSet) error {
|
||||||
|
epoch := curTs.Height()
|
||||||
|
batchHot := make([]blocks.Block, 0, batchSize)
|
||||||
|
count := int64(0)
|
||||||
|
xcount := int64(0)
|
||||||
|
missing := int64(0)
|
||||||
|
err := s.walkChain(curTs, epoch, epoch+1, // we don't load messages in warmup
|
||||||
|
func(c cid.Cid) error {
|
||||||
|
if isUnitaryObject(c) {
|
||||||
|
return errStopWalk
|
||||||
|
}
|
||||||
|
|
||||||
|
count++
|
||||||
|
|
||||||
|
has, err := s.hot.Has(c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
blk, err := s.cold.Get(c)
|
||||||
|
if err != nil {
|
||||||
|
if err == bstore.ErrNotFound {
|
||||||
|
missing++
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
xcount++
|
||||||
|
|
||||||
|
batchHot = append(batchHot, blk)
|
||||||
|
if len(batchHot) == batchSize {
|
||||||
|
err = s.hot.PutMany(batchHot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batchHot = batchHot[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(batchHot) > 0 {
|
||||||
|
err = s.hot.PutMany(batchHot)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing)
|
||||||
|
|
||||||
|
s.markSetSize = count + count>>2 // overestimate a bit
|
||||||
|
err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize))
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("error saving mark set size: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// save the warmup epoch
|
||||||
|
err = s.ds.Put(warmupEpochKey, epochToBytes(epoch))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error saving warm up epoch: %w", err)
|
||||||
|
}
|
||||||
|
s.mx.Lock()
|
||||||
|
s.warmupEpoch = epoch
|
||||||
|
s.mx.Unlock()
|
||||||
|
|
||||||
|
// also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes
|
||||||
|
err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex))
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("error saving compaction index: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
81
blockstore/sync.go
Normal file
81
blockstore/sync.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewMemorySync returns a thread-safe in-memory blockstore.
|
||||||
|
func NewMemorySync() *SyncBlockstore {
|
||||||
|
return &SyncBlockstore{bs: make(MemBlockstore)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncBlockstore is a terminal blockstore that is a synchronized version
|
||||||
|
// of MemBlockstore.
|
||||||
|
type SyncBlockstore struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
bs MemBlockstore // specifically use a memStore to save indirection overhead.
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.bs.DeleteBlock(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.bs.DeleteMany(ks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
return m.bs.Has(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
|
||||||
|
return m.bs.View(k, callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
return m.bs.Get(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
return m.bs.GetSize(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) Put(b blocks.Block) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.bs.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) PutMany(bs []blocks.Block) error {
|
||||||
|
m.mu.Lock()
|
||||||
|
defer m.mu.Unlock()
|
||||||
|
return m.bs.PutMany(bs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
// this blockstore implementation doesn't do any async work.
|
||||||
|
return m.bs.AllKeysChan(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SyncBlockstore) HashOnRead(enabled bool) {
|
||||||
|
// noop
|
||||||
|
}
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package timedbs
|
package blockstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -10,37 +10,37 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
"github.com/raulk/clock"
|
"github.com/raulk/clock"
|
||||||
"go.uber.org/multierr"
|
"go.uber.org/multierr"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TimedCacheBS is a blockstore that keeps blocks for at least the specified
|
// TimedCacheBlockstore is a blockstore that keeps blocks for at least the
|
||||||
// caching interval before discarding them. Garbage collection must be started
|
// specified caching interval before discarding them. Garbage collection must
|
||||||
// and stopped by calling Start/Stop.
|
// be started and stopped by calling Start/Stop.
|
||||||
//
|
//
|
||||||
// Under the covers, it's implemented with an active and an inactive blockstore
|
// Under the covers, it's implemented with an active and an inactive blockstore
|
||||||
// that are rotated every cache time interval. This means all blocks will be
|
// that are rotated every cache time interval. This means all blocks will be
|
||||||
// stored at most 2x the cache interval.
|
// stored at most 2x the cache interval.
|
||||||
type TimedCacheBS struct {
|
//
|
||||||
|
// Create a new instance by calling the NewTimedCacheBlockstore constructor.
|
||||||
|
type TimedCacheBlockstore struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
active, inactive blockstore.MemStore
|
active, inactive MemBlockstore
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
closeCh chan struct{}
|
closeCh chan struct{}
|
||||||
doneRotatingCh chan struct{}
|
doneRotatingCh chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTimedCacheBS(cacheTime time.Duration) *TimedCacheBS {
|
func NewTimedCacheBlockstore(interval time.Duration) *TimedCacheBlockstore {
|
||||||
return &TimedCacheBS{
|
b := &TimedCacheBlockstore{
|
||||||
active: blockstore.NewTemporary(),
|
active: NewMemory(),
|
||||||
inactive: blockstore.NewTemporary(),
|
inactive: NewMemory(),
|
||||||
interval: cacheTime,
|
interval: interval,
|
||||||
clock: build.Clock,
|
clock: clock.New(),
|
||||||
}
|
}
|
||||||
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) Start(ctx context.Context) error {
|
func (t *TimedCacheBlockstore) Start(_ context.Context) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
if t.closeCh != nil {
|
if t.closeCh != nil {
|
||||||
@ -65,11 +65,11 @@ func (t *TimedCacheBS) Start(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) Stop(ctx context.Context) error {
|
func (t *TimedCacheBlockstore) Stop(_ context.Context) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
if t.closeCh == nil {
|
if t.closeCh == nil {
|
||||||
return fmt.Errorf("not started started")
|
return fmt.Errorf("not started")
|
||||||
}
|
}
|
||||||
select {
|
select {
|
||||||
case <-t.closeCh:
|
case <-t.closeCh:
|
||||||
@ -80,15 +80,15 @@ func (t *TimedCacheBS) Stop(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) rotate() {
|
func (t *TimedCacheBlockstore) rotate() {
|
||||||
newBs := blockstore.NewTemporary()
|
newBs := NewMemory()
|
||||||
|
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
t.inactive, t.active = t.active, newBs
|
t.inactive, t.active = t.active, newBs
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) Put(b blocks.Block) error {
|
func (t *TimedCacheBlockstore) Put(b blocks.Block) error {
|
||||||
// Don't check the inactive set here. We want to keep this block for at
|
// Don't check the inactive set here. We want to keep this block for at
|
||||||
// least one interval.
|
// least one interval.
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
@ -96,33 +96,50 @@ func (t *TimedCacheBS) Put(b blocks.Block) error {
|
|||||||
return t.active.Put(b)
|
return t.active.Put(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) PutMany(bs []blocks.Block) error {
|
func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return t.active.PutMany(bs)
|
return t.active.PutMany(bs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) Get(k cid.Cid) (blocks.Block, error) {
|
func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error {
|
||||||
|
// The underlying blockstore is always a "mem" blockstore so there's no difference,
|
||||||
|
// from a performance perspective, between view & get. So we call Get to avoid
|
||||||
|
// calling an arbitrary callback while holding a lock.
|
||||||
|
t.mu.RLock()
|
||||||
|
block, err := t.active.Get(k)
|
||||||
|
if err == ErrNotFound {
|
||||||
|
block, err = t.inactive.Get(k)
|
||||||
|
}
|
||||||
|
t.mu.RUnlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return callback(block.RawData())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
b, err := t.active.Get(k)
|
b, err := t.active.Get(k)
|
||||||
if err == blockstore.ErrNotFound {
|
if err == ErrNotFound {
|
||||||
b, err = t.inactive.Get(k)
|
b, err = t.inactive.Get(k)
|
||||||
}
|
}
|
||||||
return b, err
|
return b, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) GetSize(k cid.Cid) (int, error) {
|
func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
size, err := t.active.GetSize(k)
|
size, err := t.active.GetSize(k)
|
||||||
if err == blockstore.ErrNotFound {
|
if err == ErrNotFound {
|
||||||
size, err = t.inactive.GetSize(k)
|
size, err = t.inactive.GetSize(k)
|
||||||
}
|
}
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) {
|
func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
if has, err := t.active.Has(k); err != nil {
|
if has, err := t.active.Has(k); err != nil {
|
||||||
@ -133,17 +150,23 @@ func (t *TimedCacheBS) Has(k cid.Cid) (bool, error) {
|
|||||||
return t.inactive.Has(k)
|
return t.inactive.Has(k)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) HashOnRead(_ bool) {
|
func (t *TimedCacheBlockstore) HashOnRead(_ bool) {
|
||||||
// no-op
|
// no-op
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) DeleteBlock(k cid.Cid) error {
|
func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k))
|
return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TimedCacheBS) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error {
|
||||||
|
t.mu.Lock()
|
||||||
|
defer t.mu.Unlock()
|
||||||
|
return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) {
|
||||||
t.mu.RLock()
|
t.mu.RLock()
|
||||||
defer t.mu.RUnlock()
|
defer t.mu.RUnlock()
|
||||||
|
|
||||||
@ -1,4 +1,4 @@
|
|||||||
package timedbs
|
package blockstore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -12,8 +12,8 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTimedBSSimple(t *testing.T) {
|
func TestTimedCacheBlockstoreSimple(t *testing.T) {
|
||||||
tc := NewTimedCacheBS(10 * time.Millisecond)
|
tc := NewTimedCacheBlockstore(10 * time.Millisecond)
|
||||||
mClock := clock.NewMock()
|
mClock := clock.NewMock()
|
||||||
mClock.Set(time.Now())
|
mClock.Set(time.Now())
|
||||||
tc.clock = mClock
|
tc.clock = mClock
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user