Merge pull request #2 from filecoin-project/master

merge latest Code
This commit is contained in:
swift-mx 2023-05-30 14:21:25 +08:00 committed by GitHub
commit 99c98b5de0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1604 changed files with 336924 additions and 47659 deletions

File diff suppressed because it is too large Load Diff

138
.circleci/gen.go Normal file
View File

@ -0,0 +1,138 @@
package main
import (
"embed"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"text/template"
)
//go:generate go run ./gen.go ..
//go:embed template.yml
var templateFile embed.FS
type (
dirs = []string
suite = string
)
// groupedUnitTests maps suite names to top-level directories that should be
// included in that suite. The program adds an implicit group "rest" that
// includes all other top-level directories.
var groupedUnitTests = map[suite]dirs{
"unit-node": {"node"},
"unit-storage": {"storage", "extern"},
"unit-cli": {"cli", "cmd", "api"},
}
func main() {
if len(os.Args) != 2 {
panic("expected path to repo as argument")
}
repo := os.Args[1]
tmpl := template.New("template.yml")
tmpl.Delims("[[", "]]")
tmpl.Funcs(template.FuncMap{
"stripSuffix": func(in string) string {
return strings.TrimSuffix(in, "_test.go")
},
})
tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
// list all itests.
itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
if err != nil {
panic(err)
}
// strip the dir from all entries.
for i, f := range itests {
itests[i] = filepath.Base(f)
}
// calculate the exclusion set of unit test directories to exclude because
// they are already included in a grouped suite.
var excluded = map[string]struct{}{}
for _, ss := range groupedUnitTests {
for _, s := range ss {
e, err := filepath.Abs(filepath.Join(repo, s))
if err != nil {
panic(err)
}
excluded[e] = struct{}{}
}
}
// all unit tests top-level dirs that are not itests, nor included in other suites.
var rest = map[string]struct{}{}
err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
// include all tests that aren't in the itests directory.
if strings.Contains(path, "itests") {
return filepath.SkipDir
}
// exclude all tests included in other suites
if f.IsDir() {
if _, ok := excluded[path]; ok {
return filepath.SkipDir
}
}
if strings.HasSuffix(path, "_test.go") {
rel, err := filepath.Rel(repo, path)
if err != nil {
panic(err)
}
// take the first directory
rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
}
return err
})
if err != nil {
panic(err)
}
// add other directories to a 'rest' suite.
for k := range rest {
groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
}
// map iteration guarantees no order, so sort the array in-place.
sort.Strings(groupedUnitTests["unit-rest"])
// form the input data.
type data struct {
Networks []string
ItestFiles []string
UnitSuites map[string]string
}
in := data{
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
ItestFiles: itests,
UnitSuites: func() map[string]string {
ret := make(map[string]string)
for name, dirs := range groupedUnitTests {
for i, d := range dirs {
dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
}
ret[name] = strings.Join(dirs, " ")
}
return ret
}(),
}
out, err := os.Create("./config.yml")
if err != nil {
panic(err)
}
defer out.Close()
// execute the template.
if err := tmpl.Execute(out, in); err != nil {
panic(err)
}
}

742
.circleci/template.yml Normal file
View File

@ -0,0 +1,742 @@
version: 2.1
orbs:
aws-cli: circleci/aws-cli@1.3.2
docker: circleci/docker@2.1.4
executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.19.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.19.7
resource_class: 2xlarge
ubuntu:
docker:
- image: ubuntu:20.04
commands:
build-platform-specific:
parameters:
linux:
default: true
description: is a linux build environment?
type: boolean
darwin:
default: false
description: is a darwin build environment?
type: boolean
darwin-architecture:
default: "amd64"
description: which darwin architecture is being used?
type: string
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- when:
condition: <<parameters.linux>>
steps:
- install-ubuntu-deps
- check-go-version
- when:
condition: <<parameters.darwin>>
steps:
- run:
name: Install Go
command: |
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-<<parameters.darwin-architecture>>.pkg -o /tmp/go.pkg && \
sudo installer -pkg /tmp/go.pkg -target /
- run:
name: Export Go
command: |
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
- run: go version
- run:
name: Install dependencies with Homebrew
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
- run:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: make deps
download-params:
steps:
- restore_cache:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
key: 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
install_ipfs:
steps:
- run: |
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
pushd kubo
sudo bash install.sh
popd
rm -rf kubo
rm kubo_v0.16.0_linux-amd64.tar.gz
git_fetch_all_tags:
steps:
- run:
name: fetch all tags
command: |
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
steps:
- run: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
jobs:
build:
executor: golang
working_directory: ~/lotus
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- install-ubuntu-deps
- check-go-version
- run: make deps lotus
- persist_to_workspace:
root: ~/
paths:
- "lotus"
mod-tidy-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go mod tidy -v
- run:
name: Check git diff
command: |
git --no-pager diff go.mod go.sum
git --no-pager diff --quiet go.mod go.sum
test:
description: |
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
executor:
type: executor
default: golang
go-test-flags:
type: string
default: "-timeout 20m"
description: Flags passed to go test.
target:
type: string
default: "./..."
description: Import paths of packages to be tested.
proofs-log-test:
type: string
default: "0"
get-params:
type: boolean
default: false
suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
executor: << parameters.executor >>
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- when:
condition: << parameters.get-params >>
steps:
- download-params
- run:
name: go test
environment:
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
SKIP_CONFORMANCE: "1"
LOTUS_SRC_DIR: /home/circleci/project
command: |
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
gotestsum \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
--packages="<< parameters.target >>" \
-- << parameters.go-test-flags >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/<< parameters.suite >>.json
test-conformance:
working_directory: ~/lotus
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
implementations to test their correctness and compliance with the Filecoin
specifications.
parameters:
<<: *test-params
vectors-branch:
type: string
default: ""
description: |
Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git
submodule is used.
executor: << parameters.executor >>
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- download-params
- when:
condition:
not:
equal: [ "", << parameters.vectors-branch >> ]
steps:
- run:
name: checkout vectors branch
command: |
cd extern/test-vectors
git fetch
git checkout origin/<< parameters.vectors-branch >>
- run:
name: install statediff globally
command: |
## statediff is optional; we succeed even if compilation fails.
mkdir -p /tmp/statediff
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
cd /tmp/statediff
go install ./cmd/statediff || exit 0
- run:
name: go test
environment:
SKIP_CONFORMANCE: "0"
command: |
mkdir -p /tmp/test-reports
mkdir -p /tmp/test-artifacts
gotestsum \
--format pkgname-and-test-fails \
--junitfile /tmp/test-reports/junit.xml \
-- \
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/conformance-coverage.html
build-linux-amd64:
executor: golang
steps:
- build-platform-specific
- run: make lotus lotus-miner lotus-worker
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/linux_amd64_v1 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
- persist_to_workspace:
root: /tmp/workspace
paths:
- linux_amd64_v1
build-darwin-amd64:
description: build darwin lotus binary
working_directory: ~/go/src/github.com/filecoin-project/lotus
macos:
xcode: "13.4.1"
steps:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: amd64
- run: make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
- persist_to_workspace:
root: /tmp/workspace
paths:
- darwin_amd64_v1
build-darwin-arm64:
description: self-hosted m1 runner
working_directory: ~/go/src/github.com/filecoin-project/lotus
machine: true
resource_class: filecoin-project/self-hosted-m1
steps:
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: arm64
- run: |
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/darwin_arm64 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
- persist_to_workspace:
root: /tmp/workspace
paths:
- darwin_arm64
- run:
command: make clean
when: always
- run:
name: cleanup homebrew
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
when: always
release:
executor: golang
parameters:
dry-run:
default: false
description: should this release actually publish it's artifacts?
type: boolean
steps:
- checkout
- run: |
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
sudo apt update
sudo apt install goreleaser-pro
- install_ipfs
- attach_workspace:
at: /tmp/workspace
- when:
condition: << parameters.dry-run >>
steps:
- run: goreleaser release --rm-dist --snapshot --debug
- run: ./scripts/generate-checksums.sh
- when:
condition:
not: << parameters.dry-run >>
steps:
- run: goreleaser release --rm-dist --debug
- run: ./scripts/generate-checksums.sh
- run: ./scripts/publish-checksums.sh
gofmt:
executor: golang
working_directory: ~/lotus
steps:
- run:
command: "! go fmt ./... 2>&1 | read"
gen-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
- run: git --no-pager diff && git --no-pager diff --quiet
- run: make docsgen-cli
- run: git --no-pager diff && git --no-pager diff --quiet
docs-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
lint-all:
description: |
Run golangci-lint.
working_directory: ~/lotus
parameters:
executor:
type: executor
default: golang
args:
type: string
default: ''
description: |
Arguments to pass to golangci-lint
executor: << parameters.executor >>
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run:
name: Lint
command: |
golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >>
build-docker:
description: >
Publish to Dockerhub
executor: docker/docker
parameters:
image:
type: string
default: lotus
description: >
Passed to the docker build process to determine which image in the
Dockerfile should be built. Expected values are `lotus`,
`lotus-all-in-one`
network:
type: string
default: "mainnet"
description: >
Passed to the docker build process using GOFLAGS+=-tags=<<network>>.
Expected values are `debug`, `2k`, `calibnet`, `butterflynet`,
`interopnet`.
channel:
type: string
default: ""
description: >
The release channel to use for this image.
push:
type: boolean
default: false
description: >
When true, pushes the image to Dockerhub
steps:
- setup_remote_docker
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- docker/check:
docker-username: DOCKERHUB_USERNAME
docker-password: DOCKERHUB_PASSWORD
- when:
condition:
equal: [ mainnet, <<parameters.network>> ]
steps:
- when:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>>
tag: <<parameters.channel>>
- run:
name: Docker push
command: |
docker push filecoin/<<parameters.image>>:<<parameters.channel>>
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
fi
- unless:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>>
- when:
condition:
not:
equal: [ mainnet, <<parameters.network>> ]
steps:
- when:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
tag: <<parameters.channel>>-<<parameters.network>>
- run:
name: Docker push
command: |
docker push filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>>
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
fi
- unless:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
workflows:
ci:
jobs:
- build
- lint-all:
requires:
- build
- mod-tidy-check:
requires:
- build
- gofmt:
requires:
- build
- gen-check:
requires:
- build
- docs-check:
requires:
- build
[[- range $file := .ItestFiles -]]
[[ with $name := $file | stripSuffix ]]
- test:
name: test-itest-[[ $name ]]
requires:
- build
suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]"
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config")]]
executor: golang-2xl
[[- end]]
[[- if (eq $name "wdpost")]]
get-params: true
[[end]]
[[- end ]][[- end]]
[[- range $suite, $pkgs := .UnitSuites]]
- test:
name: test-[[ $suite ]]
requires:
- build
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[if eq $suite "unit-cli"]]get-params: true[[end]]
[[- if eq $suite "unit-rest"]]executor: golang-2xl[[end]]
[[- end]]
- test:
go-test-flags: "-run=TestMulticoreSDR"
requires:
- build
suite: multicore-sdr-check
target: "./storage/sealer/ffiwrapper"
proofs-log-test: "1"
- test-conformance:
requires:
- build
suite: conformance
target: "./conformance"
release:
jobs:
- build-linux-amd64:
name: "Build ( linux / amd64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-darwin-amd64:
name: "Build ( darwin / amd64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-darwin-arm64:
name: "Build ( darwin / arm64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- release:
name: "Release"
requires:
- "Build ( darwin / amd64 )"
- "Build ( linux / amd64 )"
- "Build ( darwin / arm64 )"
filters:
branches:
ignore:
- /^.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- release:
name: "Release (dry-run)"
dry-run: true
requires:
- "Build ( darwin / amd64 )"
- "Build ( linux / amd64 )"
- "Build ( darwin / arm64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
[[- range .Networks]]
- build-docker:
name: "Docker push (lotus-all-in-one / stable / [[.]])"
image: lotus-all-in-one
channel: stable
network: [[.]]
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-docker:
name: "Docker push (lotus-all-in-one / candidate / [[.]])"
image: lotus-all-in-one
channel: candidate
network: [[.]]
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+-rc\d+$/
- build-docker:
name: "Docker push (lotus-all-in-one / edge / [[.]])"
image: lotus-all-in-one
channel: master
network: [[.]]
push: true
filters:
branches:
only:
- master
- build-docker:
name: "Docker build (lotus-all-in-one / [[.]])"
image: lotus-all-in-one
network: [[.]]
push: false
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
[[- end]]
- build-docker:
name: "Docker push (lotus / stable / mainnet)"
image: lotus
channel: stable
network: mainnet
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-docker:
name: "Docker push (lotus / candidate / mainnet)"
image: lotus
channel: candidate
network: mainnet
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+-rc\d+$/
- build-docker:
name: "Docker push (lotus / master / mainnet)"
image: lotus
channel: master
network: mainnet
push: true
filters:
branches:
only:
- master
- build-docker:
name: "Docker build (lotus / mainnet)"
image: lotus
network: mainnet
push: false
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters:
branches:
only:
- master
jobs:
[[- range .Networks]]
- build-docker:
name: "Docker (lotus-all-in-one / nightly / [[.]])"
image: lotus-all-in-one
channel: nightly
network: [[.]]
push: true
[[- end]]

View File

@ -1,3 +0,0 @@
comment: off
ignore:
- "cbor_gen.go"

2
.gitattributes vendored Normal file
View File

@ -0,0 +1,2 @@
chain/actors/builtin/*/v* linguist-generated=true
chain/actors/builtin/*/message* linguist-generated=true

6
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,6 @@
# Reference
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
# Global owners
# Ensure maintainers team is a requested reviewer for non-draft PRs
* @filecoin-project/lotus-maintainers

View File

@ -1,27 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Run '...'
2. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Version (run `lotus --version`):**
**Additional context**
Add any other context about the problem here.

85
.github/ISSUE_TEMPLATE/bug_report.yml vendored Normal file
View File

@ -0,0 +1,85 @@
name: "Bug Report"
description: "File a bug report to help us improve"
labels: [need/triage, kind/bug]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to file a bug report!
options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true
- label: I am running the [`Latest release`](https://github.com/filecoin-project/lotus/releases), the most recent RC(release canadiate) for the upcoming release or the dev branch(master), or have an issue updating to any of these.
required: true
- label: I did not make any code changes to lotus.
required: false
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing a bug for
options:
- label: lotus daemon - chain sync
required: false
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
- label: lotus message management (mpool)
required: false
- label: Other
required: false
- type: textarea
id: version
attributes:
label: Lotus Version
render: text
description: Enter the output of `lotus version` and `lotus-miner version` if applicable.
placeholder: |
e.g.
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations:
required: true
- type: textarea
id: ReproSteps
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea
id: Description
attributes:
label: Describe the Bug
description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doing when you experienced the bug?
* Any *error* messages you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour?
* For sealing issues, include the output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
* For proving issues, include the output of `lotus-miner proving` info.
validations:
required: true
- type: textarea
id: extraInfo
attributes:
label: Logging Information
render: text
description: |
Please provide debug logs of the problem, remember you can get set log level control for:
* lotus: use `lotus log list` to get all log systems available and set level by `lotus log set-level`. An example can be found [here](https://lotus.filecoin.io/lotus/configure/defaults/#log-level-control).
* lotus-miner:`lotus-miner log list` to get all log systems available and set level by `lotus-miner log set-level
If you don't provide detailed logs when you raise the issue it will almost certainly be the first request we make before furthur diagnosing the problem.
validations:
required: true

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Ask a question about Lotus or get support
url: https://github.com/filecoin-project/lotus/discussions/new/choose
about: Ask a question or request support for using Lotus
- name: Filecoin protocol feature or enhancement
url: https://github.com/filecoin-project/FIPs/discussions/new/choose
about: Write a discussion in the Filecoin Improvement Proposal repo

47
.github/ISSUE_TEMPLATE/enhancement.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Enhancement
description: Suggest an improvement to an existing lotus feature.
labels: [need/triage, kind/enhancement]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to create an improvement suggestion!
options:
- label: I **have** a specific, actionable, and well motivated improvement to an existing lotus feature.
required: true
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing an improvement request for
options:
- label: lotus daemon - chain sync
required: false
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
- label: lotus message management (mpool)
required: false
- label: Other
required: false
- type: textarea
id: request
attributes:
label: Enhancement Suggestion
description: A clear and concise description of the suggested enhancement?
placeholder: Ex. Currently lotus... However it would be great if [enhancement] was implemented... With the ability to...
validations:
required: true
- type: textarea
id: request
attributes:
label: Use-Case
description: How would this enhancement help you?
placeholder: Ex. With the [enhancement] node operators would be able to... For Storage Providers it would enable...
validations:
required: true

View File

@ -0,0 +1,61 @@
name: Feature request
description: Suggest an idea for lotus
labels: [need/triage, kind/feature]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to create a new feature request!
options:
- label: This is **not** brainstorming ideas. If you have an idea you'd like to discuss, please open a new discussion on [the lotus forum](https://github.com/filecoin-project/lotus/discussions/categories/ideas) and select the category as `Ideas`.
required: true
- label: I **have** a specific, actionable, and well motivated feature request to propose.
required: true
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing a new feature request for
options:
- label: lotus daemon - chain sync
required: false
- label: lotus fvm/fevm - Lotus FVM and FEVM interactions
required: false
- label: lotus miner/worker - sealing
required: false
- label: lotus miner - proving(WindowPoSt/WinningPoSt)
required: false
- label: lotus JSON-RPC API
required: false
- label: lotus message management (mpool)
required: false
- label: Other
required: false
- type: textarea
id: request
attributes:
label: What is the motivation behind this feature request? Is your feature request related to a problem? Please describe.
description: A clear and concise description of what the motivation or the problem is.
placeholder: Ex. I'm always frustrated when [...]
validations:
required: true
- type: textarea
id: solution
attributes:
label: Describe the solution you'd like
description: A clear and concise description of what you want to happen.
validations:
required: true
- type: textarea
id: alternatives
attributes:
label: Describe alternatives you've considered
description: A clear and concise description of any alternative solutions or features you've considered.
validations:
required: false
- type: textarea
id: extra
attributes:
label: Additional context
description: Add any other context, design docs or screenshots about the feature request here.
validations:
required: false

View File

@ -1,34 +0,0 @@
---
name: Sealing Issues
about: Create a report for help with sealing (commit) failures.
title: ''
labels: 'sealing'
assignees: ''
---
Please provide all the information requested here to help us troubleshoot "commit failed" issues.
If the information requested is missing, we will probably have to just ask you to provide it anyway,
before we can help debug.
**Describe the problem**
A brief description of the problem you encountered while proving (sealing) a sector.
Including what commands you ran, and a description of your setup, is very helpful.
**Sectors list**
The output of `./lotus-storage-miner sectors list`.
**Sectors status**
The output of `./lotus-storage-miner sectors status --log <sectorId>` for the failed sector(s).
**Lotus storage miner logs**
Please go through the logs of your storage miner, and include screenshots of any error-like messages you find.
**Version**
The output of `./lotus --version`.

View File

@ -0,0 +1,83 @@
name: "Bug Report - developer/service provider"
description: "Bug report template about FEVM/FVM for developers/service providers"
labels: [need/triage, kind/bug, area/fevm]
body:
- type: checkboxes
attributes:
label: Checklist
description: Please check off the following boxes before continuing to file a bug report!
options:
- label: This is **not** a security-related bug/issue. If it is, please follow please follow the [security policy](https://github.com/filecoin-project/lotus/security/policy).
required: true
- label: I **have** searched on the [issue tracker](https://github.com/filecoin-project/lotus/issues) and the [lotus forum](https://github.com/filecoin-project/lotus/discussions), and there is no existing related issue or discussion.
required: true
- label: I did not make any code changes to lotus.
required: false
- type: checkboxes
attributes:
label: Lotus component
description: Please select the lotus component you are filing a bug for
options:
- label: lotus Ethereum RPC
required: false
- label: lotus FVM - Lotus FVM interactions
required: false
- label: FEVM tooling
required: false
- label: Other
required: false
- type: textarea
id: version
attributes:
label: Lotus Version
render: text
description: Enter the output of `lotus version` if applicable.
placeholder: |
e.g.
Daemon: 1.19.0+mainnet+git.64059ca87+api1.5.0
Local: lotus-miner version 1.19.0+mainnet+git.64059ca87
validations:
required: true
- type: textarea
id: repro
attributes:
label: Repro Steps
description: "Steps to reproduce the behavior"
value: |
1. Run '...'
2. Do '...'
3. See error '...'
...
validations:
required: false
- type: textarea
id: Description
attributes:
label: Describe the Bug
description: |
This is where you get to tell us what went wrong, when doing so, please try to provide a clear and concise description of the bug with all related information:
* What you were doing when you experienced the bug? What are you trying to build?
* Any *error* messages and logs you saw, *where* you saw them, and what you believe may have caused them (if you have any ideas).
* What is the expected behaviour? Links to the actual code?
validations:
required: true
- type: textarea
id: toolingInfo
attributes:
label: Tooling
render: text
description: |
What kind of tooling are you using:
* Are you using ether.js, Alchemy, Hardhat, etc.
validations:
required: true
- type: textarea
id: extraInfo
attributes:
label: Configuration Options
render: text
description: |
Please provide your updated FEVM related configuration options, or custome enviroment variables related to Lotus FEVM
* lotus: use `lotus config updated` to get your configuration options, and copy the [FEVM] section
validations:
required: true

248
.github/labels.yml vendored Normal file
View File

@ -0,0 +1,248 @@
###
### Special magic GitHub labels
### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels
#
- name: "good first issue"
color: 7057ff
description: "Good for newcomers"
- name: "help wanted"
color: 008672
description: "Extra attention is needed"
###
### Goals
#
- name: goal/incentives
color: ff004d
description: "Incentinet"
###
### Areas
#
- name: area/ux
color: 00A4E0
description: "Area: UX"
- name: area/chain/vm
color: 00A4E2
description: "Area: Chain/VM"
- name: area/chain/sync
color: 00A4E4
description: "Area: Chain/Sync"
- name: area/chain/misc
color: 00A4E6
description: "Area: Chain/Misc"
- name: area/markets
color: 00A4E8
description: "Area: Markets"
- name: area/sealing/fsm
color: 0bb1ed
description: "Area: Sealing/FSM"
- name: area/sealing/storage
color: 0EB4F0
description: "Area: Sealing/Storage"
- name: area/proving
color: 0EB4F0
description: "Area: Proving"
- name: area/mining
color: 10B6F2
description: "Area: Mining"
- name: area/client/storage
color: 13B9F5
description: "Area: Client/Storage"
- name: area/client/retrieval
color: 15BBF7
description: "Area: Client/Retrieval"
- name: area/wallet
color: 15BBF7
description: "Area: Wallet"
- name: area/payment-channel
color: ff6767
description: "Area: Payment Channel"
- name: area/multisig
color: fff0ff
description: "Area: Multisig"
- name: area/networking
color: 273f8a
description: "Area: Networking"
###
### Kinds
#
- name: kind/bug
color: c92712
description: "Kind: Bug"
- name: kind/chore
color: fcf0b5
description: "Kind: Chore"
- name: kind/feature
color: FFF3B8
description: "Kind: Feature"
- name: kind/improvement
color: FFF5BA
description: "Kind: Improvement"
- name: kind/test
color: FFF8BD
description: "Kind: Test"
- name: kind/question
color: FFFDC2
description: "Kind: Question"
- name: kind/enhancement
color: FFFFC5
description: "Kind: Enhancement"
- name: kind/discussion
color: FFFFC7
description: "Kind: Discussion"
###
### Difficulties
#
- name: dif/trivial
color: b2b7ff
description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus"
- name: dif/easy
color: 7886d7
description: "An existing lotus user should be able to pick this up"
- name: dif/medium
color: 6574cd
description: "Prior development experience with lotus is likely helpful"
- name: dif/hard
color: 5661b3
description: "Suggests that having worked on the specific component affected by this issue is important"
- name: dif/expert
color: 2f365f
description: "Requires extensive knowledge of the history, implications, ramifications of the issue"
###
### Efforts
#
- name: effort/minutes
color: e8fffe
description: "Effort: Minutes"
- name: effort/hours
color: a0f0ed
description: "Effort: Hours"
- name: effort/day
color: 64d5ca
description: "Effort: One Day"
- name: effort/days
color: 4dc0b5
description: "Effort: Multiple Days"
- name: effort/week
color: 38a89d
description: "Effort: One Week"
- name: effort/weeks
color: 20504f
description: "Effort: Multiple Weeks"
###
### Impacts
#
- name: impact/regression
color: f1f5f8
description: "Impact: Regression"
- name: impact/api-breakage
color: ECF0F3
description: "Impact: API Breakage"
- name: impact/quality
color: E7EBEE
description: "Impact: Quality"
- name: impact/dx
color: E2E6E9
description: "Impact: Developer Experience"
- name: impact/test-flakiness
color: DDE1E4
description: "Impact: Test Flakiness"
- name: impact/consensus
color: b20014
description: "Impact: Consensus"
###
### Topics
#
- name: topic/interoperability
color: bf0f73
description: "Topic: Interoperability"
- name: topic/specs
color: CC1C80
description: "Topic: Specs"
- name: topic/docs
color: D9298D
description: "Topic: Documentation"
- name: topic/architecture
color: E53599
description: "Topic: Architecture"
###
### Priorities
###
- name: P0
color: dd362a
description: "P0: Critical Blocker"
- name: P1
color: ce8048
description: "P1: Must be resolved"
- name: P2
color: dbd81a
description: "P2: Should be resolved"
- name: P3
color: 9fea8f
description: "P3: Might get resolved"
###
### Hints
#
#- name: hint/good-first-issue
# color: 7057ff
# description: "Hint: Good First Issue"
#- name: hint/help-wanted
# color: 008672
# description: "Hint: Help Wanted"
- name: hint/needs-decision
color: 33B9A5
description: "Hint: Needs Decision"
- name: hint/needs-triage
color: 1AA08C
description: "Hint: Needs Triage"
- name: hint/needs-analysis
color: 26AC98
description: "Hint: Needs Analysis"
- name: hint/needs-author-input
color: 33B9A5
description: "Hint: Needs Author Input"
- name: hint/needs-team-input
color: 40C6B2
description: "Hint: Needs Team Input"
- name: hint/needs-community-input
color: 4DD3BF
description: "Hint: Needs Community Input"
- name: hint/needs-review
color: 5AE0CC
description: "Hint: Needs Review"
###
### Statuses
#
- name: status/done
color: edb3a6
description: "Status: Done"
- name: status/deferred
color: E0A699
description: "Status: Deferred"
- name: status/in-progress
color: D49A8D
description: "Status: In Progress"
- name: status/blocked
color: C78D80
description: "Status: Blocked"
- name: status/inactive
color: BA8073
description: "Status: Inactive"
- name: status/waiting
color: AD7366
description: "Status: Waiting"
- name: status/rotten
color: 7A4033
description: "Status: Rotten"
- name: status/discarded
color: 6D3326
description: "Status: Discarded / Won't fix"

23
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,23 @@
## Related Issues
<!-- Link issues that this PR might resolve/fix. If an issue doesn't exist, include a brief motivation for the change being made -->
## Proposed Changes
<!-- A clear list of the changes being made -->
## Additional Info
<!-- Callouts, links to documentation, and etc -->
## Checklist
Before you mark the PR ready for review, please make sure that:
- [ ] Commits have a clear commit message.
- [ ] PR title is in the form of of `<PR type>: <area>: <change being made>`
- example: ` fix: mempool: Introduce a cache for valid signatures`
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
- [ ] New features have usage guidelines and / or documentation updates in
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
- [ ] [Discussion Tutorials](https://github.com/filecoin-project/lotus/discussions/categories/tutorials)
- [ ] Tests exist for new functionality or change in behavior
- [ ] CI is green

73
.github/workflows/codeql-analysis.yml vendored Normal file
View File

@ -0,0 +1,73 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches:
- master
- 'release/*'
pull_request:
# The branches below must be a subset of the branches above
branches:
- master
- 'release/*'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '1.18.8'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: go
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

17
.github/workflows/label-syncer.yml vendored Normal file
View File

@ -0,0 +1,17 @@
name: Label syncer
on:
push:
paths:
- '.github/labels.yml'
branches:
- master
jobs:
build:
name: Sync labels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1.0.0
- uses: micnncim/action-label-syncer@v1.0.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

33
.github/workflows/stale.yml vendored Normal file
View File

@ -0,0 +1,33 @@
name: Close and mark stale issue
on:
schedule:
- cron: '0 12 * * *'
jobs:
stale:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
close-issue-message: 'This issue was closed because it is missing author input.'
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
close-pr-message: 'This PR was closed because it is missing author input. Please feel free to reopen the PR when you get to it! Thank you for your interest in contributing to lotus!'
stale-issue-label: 'kind/stale'
stale-pr-label: 'kind/stale'
any-of-labels: 'need/author-input '
days-before-issue-stale: 3
days-before-issue-close: 1
days-before-pr-stale: 5
days-before-pr-close: 2
remove-stale-when-updated: true
enable-statistics: true

14
.github/workflows/sync-master-main.yaml vendored Normal file
View File

@ -0,0 +1,14 @@
name: sync-master-main
on:
push:
branches:
- master
jobs:
sync:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: update remote branch main
run: |
# overrides the remote branch (origin:github) `main`
git push origin --force master:main

View File

@ -0,0 +1,29 @@
---
name: Testground PR Checker
on: [push]
jobs:
testground:
runs-on: ubuntu-latest
name: ${{ matrix.composition_file }}
strategy:
matrix:
include:
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
steps:
- uses: actions/checkout@v2
- name: testground run
uses: testground/testground-github-action@v1
with:
backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }}
plan_directory: ${{ matrix.plan_directory }}
composition_file: ${{ matrix.composition_file }}

38
.gitignore vendored
View File

@ -1,19 +1,25 @@
/lotus
/lotus-storage-miner
/lotus-seal-worker
/lotus-miner
/lotus-worker
/lotus-seed
/lotus-health
/lotus-chainwatch
/lotus-shed
/pond
/townhall
/fountain
/stats
/bench
/lotus-sim
/lotus-townhall
/lotus-fountain
/lotus-stats
/lotus-bench
/lotus-gateway
/lotus-pcr
/lotus-wallet
/lotus-keygen
/docgen-md
/docgen-openrpc
/bench.json
/lotuspond/front/node_modules
/lotuspond/front/build
/cmd/lotus-townhall/townhall/node_modules
/cmd/lotus-townhall/townhall/build
/cmd/lotus-townhall/townhall/package-lock.json
extern/filecoin-ffi/rust/target
**/*.a
**/*.pc
@ -24,14 +30,26 @@ build/paramfetch.sh
/vendor
/blocks.dot
/blocks.svg
/chainwatch
/chainwatch.db
/bundle
/darwin
/linux
*.snap
*-fuzz.zip
/chain/types/work_msg/
bin/ipget
bin/tmp/*
.idea
scratchpad
build/builtin-actors/v*
build/builtin-actors/*.car
dist/
# The following files are checked into git and result
# in dirty git state if removed from the docker context
!extern/filecoin-ffi/rust/filecoin.pc
!extern/test-vectors

6
.gitmodules vendored
View File

@ -1,7 +1,9 @@
[submodule "extern/filecoin-ffi"]
path = extern/filecoin-ffi
url = https://github.com/filecoin-project/filecoin-ffi.git
branch = master
[submodule "extern/serialization-vectors"]
path = extern/serialization-vectors
url = https://github.com/filecoin-project/serialization-vectors
url = https://github.com/filecoin-project/serialization-vectors.git
[submodule "extern/test-vectors"]
path = extern/test-vectors
url = https://github.com/filecoin-project/test-vectors.git

View File

@ -16,22 +16,63 @@ linters:
- deadcode
- scopelint
# We don't want to skip builtin/
skip-dirs-use-default: false
skip-dirs:
- vendor$
- testdata$
- examples$
issues:
exclude:
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
- "by other packages, and that stutters; consider calling this"
- "Potential file inclusion via variable"
- "should have( a package)? comment"
- "Error return value of `logging.SetLogLevel` is not checked"
- "comment on exported"
- "(func|method) \\w+ should be \\w+"
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
- "(G306|G301|G307|G108|G302|G204|G104)"
- "don't use ALL_CAPS in Go names"
- "string .* has .* occurrences, make it a constant"
- "a blank import should be only in a main or test package, or have a comment justifying it"
- "package comment should be of the form"
- "Potential hardcoded credentials"
- "Use of weak random number generator"
- "xerrors.* is deprecated"
exclude-use-default: false
exclude-rules:
- path: node/modules/lp2p
linters:
- golint
- path: ".*_test.go"
- path: build/params_.*\.go
linters:
- golint
- path: api/apistruct/struct.go
linters:
- golint
- path: .*_test.go
linters:
- gosec
- path: chain/vectors/gen/.*
linters:
- gosec
- path: cmd/lotus-bench/.*
linters:
- gosec
- path: api/test/.*
text: "context.Context should be the first parameter"
linters:
- golint
linters-settings:
goconst:
min-occurrences: 6

109
.goreleaser.yaml Normal file
View File

@ -0,0 +1,109 @@
project_name: lotus
universal_binaries:
- id: lotus
replace: true
name_template: lotus
- id: lotus-miner
replace: true
name_template: lotus-miner
- id: lotus-worker
replace: true
name_template: lotus-worker
builds:
- id: lotus
binary: lotus
builder: prebuilt
goos:
- darwin
- linux
goarch:
- amd64
- arm64
goamd64:
- v1
ignore:
- goos: linux
goarch: arm64
prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus
- id: lotus-miner
binary: lotus-miner
builder: prebuilt
goos:
- darwin
- linux
goarch:
- amd64
- arm64
goamd64:
- v1
ignore:
- goos: linux
goarch: arm64
prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner
- id: lotus-worker
binary: lotus-worker
builder: prebuilt
goos:
- darwin
- linux
goarch:
- amd64
- arm64
goamd64:
- v1
ignore:
- goos: linux
goarch: arm64
prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker
archives:
- id: primary
format: tar.gz
wrap_in_directory: true
name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
# this is a dumb but required hack so it doesn't include the default files
# https://github.com/goreleaser/goreleaser/issues/602
- _n_o_n_e_*
release:
github:
owner: filecoin-project
name: lotus
prerelease: auto
name_template: "v{{.Version}}"
brews:
- tap:
owner: filecoin-project
name: homebrew-lotus
branch: master
ids:
- primary
install: |
bin.install "lotus"
bin.install "lotus-miner"
bin.install "lotus-worker"
test: |
system "#{bin}/lotus --version"
system "#{bin}/lotus-miner --version"
system "#{bin}/lotus-worker --version"
folder: Formula
homepage: "https://filecoin.io"
description: "A homebrew cask for installing filecoin-project/lotus on MacOS"
license: MIT
skip_upload: auto
dependencies:
- name: hwloc
# produced manually so we can include cid checksums
checksum:
disable: true
snapshot:
name_template: "{{ .Version }}"

File diff suppressed because it is too large Load Diff

136
Dockerfile Normal file
View File

@ -0,0 +1,136 @@
#####################################
FROM golang:1.19.7-buster AS lotus-builder
MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev
ENV XDG_CACHE_HOME="/tmp"
### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile
ENV RUSTUP_HOME=/usr/local/rustup \
CARGO_HOME=/usr/local/cargo \
PATH=/usr/local/cargo/bin:$PATH \
RUST_VERSION=1.63.0
RUN set -eux; \
dpkgArch="$(dpkg --print-architecture)"; \
case "${dpkgArch##*-}" in \
amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \
arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \
*) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \
esac; \
url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \
wget "$url"; \
echo "${rustupSha256} *rustup-init" | sha256sum -c -; \
chmod +x rustup-init; \
./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \
rm rustup-init; \
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
rustup --version; \
cargo --version; \
rustc --version;
COPY ./ /opt/filecoin
WORKDIR /opt/filecoin
RUN scripts/docker-git-state-check.sh
### make configurable filecoin-ffi build
ARG FFI_BUILD_FROM_SOURCE=0
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
RUN make clean deps
ARG RUSTFLAGS=""
ARG GOFLAGS=""
RUN make buildall
#####################################
FROM ubuntu:20.04 AS lotus-base
MAINTAINER Lotus Development Team
# Base resources
COPY --from=lotus-builder /etc/ssl/certs /etc/ssl/certs
COPY --from=lotus-builder /lib/*/libdl.so.2 /lib/
COPY --from=lotus-builder /lib/*/librt.so.1 /lib/
COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/
COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/
COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/
COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/
COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/
RUN useradd -r -u 532 -U fc \
&& mkdir -p /etc/OpenCL/vendors \
&& echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
#####################################
FROM lotus-base AS lotus
MAINTAINER Lotus Development Team
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY scripts/docker-lotus-entrypoint.sh /
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus
ENV DOCKER_LOTUS_IMPORT_WALLET ""
RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/tmp/filecoin-proof-parameters
USER fc
EXPOSE 1234
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
CMD ["-help"]
#####################################
FROM lotus-base AS lotus-all-in-one
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
ENV LOTUS_PATH /var/lib/lotus
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
ENV WALLET_PATH /var/lib/lotus-wallet
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
RUN mkdir /var/tmp/filecoin-proof-parameters
RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet
RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet
VOLUME /var/tmp/filecoin-proof-parameters
VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet
EXPOSE 1234
EXPOSE 2345
EXPOSE 3456
EXPOSE 1777

1
GO_VERSION_MIN Normal file
View File

@ -0,0 +1 @@
1.19.7

142
LOTUS_RELEASE_FLOW.md Normal file
View File

@ -0,0 +1,142 @@
<!-- TOC -->
- [`lotus` Release Flow](#lotus-release-flow)
- [Purpose](#purpose)
- [High-level Summary](#high-level-summary)
- [Motivation and Requirements](#motivation-and-requirements)
- [Adopted Conventions](#adopted-conventions)
- [Major Releases](#major-releases)
- [Mandatory Releases](#mandatory-releases)
- [Feature Releases](#feature-releases)
- [Examples Scenarios](#examples-scenarios)
- [Release Cycle](#release-cycle)
- [Patch Releases](#patch-releases)
- [Performing a Release](#performing-a-release)
- [Security Fix Policy](#security-fix-policy)
- [FAQ](#faq)
- [Why aren't Go major versions used more?](#why-arent-go-major-versions-used-more)
- [Related Items](#related-items)
<!-- /TOC -->
# `lotus` Release Flow
## Purpose
This document aims to describe how the Lotus team plans to ship releases of the Lotus implementation of Filecoin network. Interested parties can expect new releases to come out as described in this document.
## High-level Summary
- **Major releases** (1.0.0, 2.0.0, etc.) are reserved for significant changes to the Filecoin protocol that transform the network and its usecases. Such changes could include the addition of new Virtual Machines to the protocol, major upgrades to the Proofs of Replication used by Filecoin, etc.
- Even minor releases (1.2.0, 1.4.0, etc.) of the Lotus software correspond to **mandatory releases** as they ship Filecoin network upgrades. Users **must** upgrade to these releases before a certain time in order to keep in sync with the Filecoin network. We aim to ensure there is at least one week before the upgrade deadline.
- Patch versions of even minor releases (1.2.1, 1.4.2, etc.) correspond to **hotfix releases**. Such releases will only be shipped when a critical fix is needed to be applied on top of a mandatory release.
- Odd minor releases (1.3.0, 1.5.0, etc.), as well as patch releases in these series (1.3.1, 1.5.2, etc.) correspond to **feature releases** with new development and bugfixes. These releases are not mandatory, but still highly recommended, **as they may contain critical security fixes**.
- We aim to ship a new feature release of the Lotus software every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule.
## Motivation and Requirements
Our primary motivation is for users of the Lotus software (storage providers, storage clients, developers, etc.) to have a clear idea about when they can expect Lotus releases, and what they can expect in a release.
In order to achieve this, we need the following from our release process and conventions:
- Lotus version conventions make it immediately obvious whether a new Lotus release is mandatory or not. A release is mandatory if it ships a network upgrade to the Filecoin protocol.
- The ability to ship critical fixes on top of mandatory releases, so as to avoid forcing users to consume larger unrelated changes.
- A regular cadence of feature releases, so that users can know when a new Lotus release will be available for consumption.
- The ability to ship any number of feature releases between two mandatory releases.
- A clear description of the various stages of testing that a Lotus Release Candidate (RC) goes through.
- Lotus Release issues will present a single source of truth for what may be contained in Lotus releases, including security fixes, and how they will be disclosed.
## Adopted Conventions
This section describes the conventions we have adopted. Users of Lotus are encouraged to review this in detail so as to be informed when new Lotus releases are shipped.
### Major Releases
Bumps to the Lotus major version number (1.0.0, 2.0.0, etc.) are reserved for significant changes to the Filecoin protocol that dramatically transform the network itself. Such changes could include the addition of new Virtual Machines to the protocol, major upgrades to the Proofs of Replication used by Filecoin, etc. These releases are expected to take lots of time to develop and will be rare. See also "Why aren't Go major versions used more?" below.
### Mandatory Releases
Even bumps to the Lotus minor version number (1.2.0, 1.4.0, etc.) are reserved for **mandatory releases** that ship Filecoin network upgrades. Users **must** upgrade to these releases before a certain time in order to keep in sync with the Filecoin network.
Depending on the scope of the upgrade, these releases may take up to several weeks to fully develop and test. We aim to ensure there are at least 2 weeks between the publication of the final Lotus release and the Filecoin network upgrade deadline.
These releases do not follow a regular cadence, as they are developed in lockstep with the other implementations of the Filecoin protocol. As of August 2021, the developers aim to ship 3-4 Filecoin network upgrades a year, though smaller security-critical upgrades may occur unpredictably.
Mandatory releases are somewhat sensitive since all Lotus users are forced to upgrade to them at the same time. As a result, they will be shipped on top of the most recent stable release of Lotus, which will generally be the latest Lotus release that has been in production for more than 2 weeks. (Note: given this rule, the basis of a mandatory release could be a mandatory release or a feature release depending on timing). Mandatory releases will not include any new feature development or bugfixes that haven't already baked in production for 2+ weeks, except for the changes needed for the network upgrade itself. Further, any critical fixes that are needed after the network upgrade will be shipped as patch version bumps to the mandatory release (1.2.1, 1.2.2, etc.) This prevents users from being forced to quickly digest unnecessary changes.
Users should generally aim to always upgrade to a new even minor version release since they either introduce a mandatory network upgrade or a critical fix.
### Feature Releases
All releases under an odd minor version number indicate **feature releases**. These could include releases such as 1.3.0, 1.3.1, 1.5.2, etc.
Feature releases include new development and bug fixes. They are not mandatory, but still highly recommended, **as they may contain critical security fixes**. Note that some of these releases may be very small patch releases that include critical hotfixes. There is no way to distinguish between a bug fix release and a feature release on the "feature" version. Both cases will use the "patch" version number.
We aim to ship a new feature release of the Lotus software from our development (master) branch every 3 weeks, so users can expect a regular cadence of Lotus feature releases. Note that mandatory releases for network upgrades may disrupt this schedule. For more, see the Release Cycle section (TODO: Link).
### Examples Scenarios
- **Scenario 1**: **Lotus 1.12.0 shipped a network upgrade, and no network upgrades are needed for a long while.**
In this case, the next feature release will be Lotus 1.13.0. In three-week intervals, we will ship Lotus 1.13.1, 1.13.2, and 1.13.3, all containing new features and bug fixes.
Let us assume that after the release of 1.13.3, a critical issue is discovered and a hotfix quickly developed. This hotfix will then be shipped in **both** 1.12.1 and 1.13.4. Users who have already upgrade to the 1.13 series can simply upgrade to 1.13.4. Users who have chosen to still be on 1.12.0, however, can use 1.12.1 to patch the critical issue without being forced to consume all the changes in the 1.13 series.
- **Scenario 2**: **Lotus 1.12.0 shipped a network upgrade, but the need for an unexpected network upgrade soon arises**
In this case, the Lotus 1.13 series will be dropped entirely, including any RCs that may have been undergoing testing. Instead, the network upgrade will be shipped as Lotus 1.14.0, built on top of Lotus 1.12.0. It will thus include no unnecessary changes, only the work needed to support the new network upgrade.
Any changes that were being worked on in the 1.13.0 series will then get applied on top of Lotus 1.14.0 and get shipped as Lotus 1.15.0.
## Release Cycle
A mandatory release process should take about 3-6 weeks, depending on the amount and the overall complexity of new features being introduced to the network protocol. It may also be shorter if there is a network incident that requires an emergency upgrade. A feature release process should take about 2-3 weeks.
The start time of the mandatory release process is subject to the network upgrade timeline. We will start a new feature release process every 3 weeks on Tuesdays, regardless of when the previous release landed unless it's still ongoing.
### Patch Releases
**Mandatory Release**
If we encounter a serious bug in a mandatory release post a network upgrade, we will create a patch release based on this release. Strictly only the fix to the bug will be included in the patch, and the bug fix will be backported to the master (dev) branch, and any ongoing feature release branch if applicable.
Patch release process for the mandatory releases will follow a compressed release cycle from hours to days depending on the severity and the impact to the network of the bug. In a patch release:
1. Automated and internal testing (stage 0 and 1) will be compressed into a few hours.
2. Stage 2-3 will be skipped or shortened case by case.
Some patch releases, especially ones fixing one or more complex bugs that doesn't require a follow-up mandatory upgrade, may undergo the full release process.
**Feature Release**
Patch releases in odd minor releases (1.3.0, 1.5.0, etc.) like 1.3.1, 1.5.2 and etc are corresponding to another **feature releases** with new development and bugfixes. These releases are not mandatory, but still **highly** recommended, **as they may contain critical security fixes**.
---
### Performing a Release
At the beginning of each release cycle, we will generate our "Release tracking issue", which is populated with the content at [https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md)
This template will be used to track major goals we have, a planned shipping date, and a complete release checklist tied to a specific release.
### Security Fix Policy
Any release may contain security fixes. Unless the fix addresses a bug being exploited in the wild, the fix will *not* be called out in the release notes. Please make sure to update ASAP.
By policy, the team will usually wait until about 3 weeks after the final release to announce any fixed security issues. However, depending on the impact and ease of discovery of the issue, the team may wait more or less time.
It is important to always update to the latest version ASAP and file issues if you're unable to update for some reason.
Finally, unless a security issue is actively being exploited or a significant number of users are unable to update to the latest version (e.g., due to a difficult migration, breaking changes, etc.), security fixes will *not* be backported to previous releases.
## FAQ
### Why aren't Go major versions used more?
Golang tightly couples source code with versioning (major versions beyond v1 leak into import paths). This poses logistical difficulties to using major versions here. Concretely, if we were to pick a policy that bumped the major version on every network upgrade, we would disrupt every single downstream library/application that consumed the native Lotus API (e.g., libraries depending on the JSON-RPC client, testground tests). They would need to update their code every single time that we released a network breaking change, even if it brought on zero expectation of breakage for the Golang APIs that they depend on. In this scenario, we are signaling breakage on the wrong API surface! We're signaling breakage on the Go level, when what breaks is the network protocol.
## Related Items
1. [Release Issue template](https://github.com/filecoin-project/lotus/blob/master/documentation/misc/RELEASE_ISSUE_TEMPLATE.md)
2. [Lotus Release Flow Discussion](https://github.com/filecoin-project/lotus/discussions/7053): Leave a comment if you have any questions or feedbacks with regard to the lotus release flow.

352
Makefile
View File

@ -5,10 +5,14 @@ all: build
unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
ifeq ($(shell expr $(GOVERSION) \< 13), 1)
$(warning Your Golang version is go 1.$(GOVERSION))
$(error Update Golang to version $(shell grep '^go' go.mod))
GOCC?=go
GOVERSION:=$(shell $(GOCC) version | tr ' ' '\n' | grep go1 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
GOVERSIONMIN:=$(shell cat GO_VERSION_MIN | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
ifeq ($(shell expr $(GOVERSION) \< $(GOVERSIONMIN)), 1)
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
$(error Update Golang to version to at least $(shell cat GO_VERSION_MIN))
endif
# git modules that need to be loaded
@ -17,7 +21,7 @@ MODULES:=
CLEAN:=
BINS:=
ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit='+git$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))'
ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))
ifneq ($(strip $(LDFLAGS)),)
ldflags+=-extldflags=$(LDFLAGS)
endif
@ -41,8 +45,13 @@ MODULES+=$(FFI_PATH)
BUILD_DEPS+=build/.filecoin-install
CLEAN+=build/.filecoin-install
$(MODULES): build/.update-modules ;
ffi-version-check:
@[[ "$$(awk '/const Version/{print $$5}' extern/filecoin-ffi/version.go)" -eq 3 ]] || (echo "FFI version mismatch, update submodules"; exit 1)
BUILD_DEPS+=ffi-version-check
.PHONY: ffi-version-check
$(MODULES): build/.update-modules ;
# dummy file that marks the last time modules were updated
build/.update-modules:
git submodule update --init --recursive
@ -57,146 +66,213 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS)
.PHONY: deps
build-devnets: build lotus-seed lotus-shed
.PHONY: build-devnets
debug: GOFLAGS+=-tags=debug
debug: lotus lotus-storage-miner lotus-seal-worker lotus-seed
debug: build-devnets
2k: GOFLAGS+=-tags=2k
2k: lotus lotus-storage-miner lotus-seal-worker lotus-seed
2k: build-devnets
calibnet: GOFLAGS+=-tags=calibnet
calibnet: build-devnets
butterflynet: GOFLAGS+=-tags=butterflynet
butterflynet: build-devnets
interopnet: GOFLAGS+=-tags=interopnet
interopnet: build-devnets
lotus: $(BUILD_DEPS)
rm -f lotus
go build $(GOFLAGS) -o lotus ./cmd/lotus
go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build
$(GOCC) build $(GOFLAGS) -o lotus ./cmd/lotus
.PHONY: lotus
BINS+=lotus
lotus-storage-miner: $(BUILD_DEPS)
rm -f lotus-storage-miner
go build $(GOFLAGS) -o lotus-storage-miner ./cmd/lotus-storage-miner
go run github.com/GeertJohan/go.rice/rice append --exec lotus-storage-miner -i ./build
.PHONY: lotus-storage-miner
BINS+=lotus-storage-miner
lotus-miner: $(BUILD_DEPS)
rm -f lotus-miner
$(GOCC) build $(GOFLAGS) -o lotus-miner ./cmd/lotus-miner
.PHONY: lotus-miner
BINS+=lotus-miner
lotus-seal-worker: $(BUILD_DEPS)
rm -f lotus-seal-worker
go build $(GOFLAGS) -o lotus-seal-worker ./cmd/lotus-seal-worker
go run github.com/GeertJohan/go.rice/rice append --exec lotus-seal-worker -i ./build
.PHONY: lotus-seal-worker
BINS+=lotus-seal-worker
lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
.PHONY: lotus-worker
BINS+=lotus-worker
lotus-shed: $(BUILD_DEPS)
rm -f lotus-shed
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build
$(GOCC) build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
.PHONY: lotus-shed
BINS+=lotus-shed
build: lotus lotus-storage-miner lotus-seal-worker
lotus-gateway: $(BUILD_DEPS)
rm -f lotus-gateway
$(GOCC) build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
.PHONY: lotus-gateway
BINS+=lotus-gateway
build: lotus lotus-miner lotus-worker
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
.PHONY: build
install:
install: install-daemon install-miner install-worker
install-daemon:
install -C ./lotus /usr/local/bin/lotus
install -C ./lotus-storage-miner /usr/local/bin/lotus-storage-miner
install -C ./lotus-seal-worker /usr/local/bin/lotus-seal-worker
install-services: install
mkdir -p /usr/local/lib/systemd/system
install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service
install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service
systemctl daemon-reload
@echo
@echo "lotus and lotus-miner services installed. Don't forget to 'systemctl enable lotus|lotus-miner' for it to be enabled on startup."
install-miner:
install -C ./lotus-miner /usr/local/bin/lotus-miner
clean-services:
rm -f /usr/local/lib/systemd/system/lotus-daemon.service
rm -f /usr/local/lib/systemd/system/lotus-miner.service
systemctl daemon-reload
install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker
install-app:
install -C ./$(APP) /usr/local/bin/$(APP)
uninstall: uninstall-daemon uninstall-miner uninstall-worker
.PHONY: uninstall
uninstall-daemon:
rm -f /usr/local/bin/lotus
uninstall-miner:
rm -f /usr/local/bin/lotus-miner
uninstall-worker:
rm -f /usr/local/bin/lotus-worker
# TOOLS
lotus-seed: $(BUILD_DEPS)
rm -f lotus-seed
go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build
$(GOCC) build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
.PHONY: lotus-seed
BINS+=lotus-seed
benchmarks:
go run github.com/whyrusleeping/bencher ./... > bench.json
$(GOCC) run github.com/whyrusleeping/bencher ./... > bench.json
@echo Submitting results
@curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}"
.PHONY: benchmarks
pond: build
go build -o pond ./lotuspond
(cd lotuspond/front && npm i && CI=false npm run build)
.PHONY: pond
BINS+=pond
lotus-fountain:
rm -f lotus-fountain
$(GOCC) build $(GOFLAGS) -o lotus-fountain ./cmd/lotus-fountain
$(GOCC) run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
.PHONY: lotus-fountain
BINS+=lotus-fountain
townhall:
rm -f townhall
go build -o townhall ./cmd/lotus-townhall
(cd ./cmd/lotus-townhall/townhall && npm i && npm run build)
go run github.com/GeertJohan/go.rice/rice append --exec townhall -i ./cmd/lotus-townhall -i ./build
.PHONY: townhall
BINS+=townhall
lotus-bench:
rm -f lotus-bench
$(GOCC) build $(GOFLAGS) -o lotus-bench ./cmd/lotus-bench
.PHONY: lotus-bench
BINS+=lotus-bench
fountain:
rm -f fountain
go build -o fountain ./cmd/lotus-fountain
go run github.com/GeertJohan/go.rice/rice append --exec fountain -i ./cmd/lotus-fountain -i ./build
.PHONY: fountain
BINS+=fountain
lotus-stats:
rm -f lotus-stats
$(GOCC) build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats
.PHONY: lotus-stats
BINS+=lotus-stats
chainwatch:
rm -f chainwatch
go build -o chainwatch ./cmd/lotus-chainwatch
go run github.com/GeertJohan/go.rice/rice append --exec chainwatch -i ./cmd/lotus-chainwatch -i ./build
.PHONY: chainwatch
BINS+=chainwatch
lotus-pcr:
rm -f lotus-pcr
$(GOCC) build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
.PHONY: lotus-pcr
BINS+=lotus-pcr
bench:
rm -f bench
go build -o bench ./cmd/lotus-bench
go run github.com/GeertJohan/go.rice/rice append --exec bench -i ./build
.PHONY: bench
BINS+=bench
stats:
rm -f stats
go build -o stats ./tools/stats
go run github.com/GeertJohan/go.rice/rice append --exec stats -i ./build
.PHONY: stats
BINS+=stats
health:
lotus-health:
rm -f lotus-health
go build -o lotus-health ./cmd/lotus-health
go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build
$(GOCC) build -o lotus-health ./cmd/lotus-health
.PHONY: lotus-health
BINS+=lotus-health
.PHONY: health
BINS+=health
lotus-wallet:
rm -f lotus-wallet
$(GOCC) build $(GOFLAGS) -o lotus-wallet ./cmd/lotus-wallet
.PHONY: lotus-wallet
BINS+=lotus-wallet
lotus-keygen:
rm -f lotus-keygen
$(GOCC) build -o lotus-keygen ./cmd/lotus-keygen
.PHONY: lotus-keygen
BINS+=lotus-keygen
testground:
$(GOCC) build -tags testground -o /dev/null ./cmd/lotus
.PHONY: testground
BINS+=testground
tvx:
rm -f tvx
$(GOCC) build -o tvx ./cmd/tvx
.PHONY: tvx
BINS+=tvx
lotus-sim: $(BUILD_DEPS)
rm -f lotus-sim
$(GOCC) build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim
.PHONY: lotus-sim
BINS+=lotus-sim
# SYSTEMD
install-daemon-service: install-daemon
mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
systemctl daemon-reload
@echo
@echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup."
install-miner-service: install-miner install-daemon-service
mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
systemctl daemon-reload
@echo
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
install-main-services: install-miner-service
install-all-services: install-main-services
install-services: install-main-services
clean-daemon-service: clean-miner-service
-systemctl stop lotus-daemon
-systemctl disable lotus-daemon
rm -f /etc/systemd/system/lotus-daemon.service
systemctl daemon-reload
clean-miner-service:
-systemctl stop lotus-miner
-systemctl disable lotus-miner
rm -f /etc/systemd/system/lotus-miner.service
systemctl daemon-reload
clean-main-services: clean-daemon-service
clean-all-services: clean-main-services
clean-services: clean-all-services
# MISC
buildall: $(BINS)
completions:
./scripts/make-completions.sh lotus
./scripts/make-completions.sh lotus-storage-miner
.PHONY: completions
install-completions:
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
install -C ./scripts/bash-completion/lotus-storage-miner /usr/share/bash-completion/completions/lotus-storage-miner
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
install -C ./scripts/zsh-completion/lotus-storage-miner /usr/local/share/zsh/site-functions/_lotus-storage-miner
clean:
rm -rf $(CLEAN) $(BINS)
@ -208,13 +284,95 @@ dist-clean:
git submodule deinit --all -f
.PHONY: dist-clean
type-gen:
go run ./gen/main.go
type-gen: api-gen
$(GOCC) run ./gen/main.go
$(GOCC) generate -x ./...
goimports -w api/
method-gen:
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
actors-code-gen:
$(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json
$(GOCC) run ./chain/actors/agen
$(GOCC) fmt ./...
gen: type-gen method-gen
actors-gen: actors-code-gen fiximports
.PHONY: actors-gen
bundle-gen:
$(GOCC) run ./gen/bundle $(VERSION) $(RELEASE) $(RELEASE_OVERRIDES)
$(GOCC) fmt ./build/...
.PHONY: bundle-gen
api-gen:
$(GOCC) run ./gen/api
goimports -w api
goimports -w api
.PHONY: api-gen
cfgdoc-gen:
$(GOCC) run ./node/config/cfgdocgen > ./node/config/doc_gen.go
appimage: lotus
rm -rf appimage-builder-cache || true
rm AppDir/io.filecoin.lotus.desktop || true
rm AppDir/icon.svg || true
rm Appdir/AppRun || true
mkdir -p AppDir/usr/bin
cp ./lotus AppDir/usr/bin/
appimage-builder
docsgen: docsgen-md docsgen-openrpc fiximports
docsgen-md-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-md ./api/docgen/cmd
docsgen-openrpc-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
./docgen-md "api/v0api/full.go" "FullNode" "v0api" "./api/v0api" > documentation/en/api-v0-methods.md
docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
docsgen-openrpc-full: docsgen-openrpc-bin
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
docsgen-openrpc-storage: docsgen-openrpc-bin
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
docsgen-openrpc-worker: docsgen-openrpc-bin
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
docsgen-openrpc-gateway: docsgen-openrpc-bin
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" -gzip > build/openrpc/gateway.json.gz
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
fiximports:
./scripts/fiximports
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli"
.PHONY: gen
jen: gen
snap: lotus lotus-miner lotus-worker
snapcraft
# snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker
python3 ./scripts/generate-lotus-cli.py
./lotus config default > documentation/en/default-lotus-config.toml
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
.PHONY: docsgen-cli
print-%:
@echo $*=$($*)
circleci:
go generate -x ./.circleci

135
README.md
View File

@ -1,16 +1,137 @@
![Lotus](documentation/images/lotus_logo_h.png)
<p align="center">
<a href="https://lotus.filecoin.io/" title="Filecoin Docs">
<img src="documentation/images/lotus_logo_h.png" alt="Project Lotus Logo" width="244" />
</a>
</p>
# Project Lotus - 莲
<h1 align="center">Project Lotus - 莲</h1>
Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs).
<p align="center">
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.8-blue.svg" /></a>
<br>
</p>
## Development
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).
Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io).
## Building & Documentation
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
> Note: The default `master` branch is the dev branch, please use with caution. For the latest stable version, checkout the most recent [`Latest release`](https://github.com/filecoin-project/lotus/releases).
For complete instructions on how to build, install and setup lotus, please visit [https://lotus.filecoin.io](https://lotus.filecoin.io/lotus/install/prerequisites/#supported-platforms). Basic build instructions can be found further down in this readme.
## Reporting a Vulnerability
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
## Related packages
These repos are independent and reusable modules, but are tightly integrated into Lotus to make up a fully featured Filecoin implementation:
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
- [builtin-actors](https://github.com/filecoin-project/builtin-actors)
## Contribute
Lotus is a universally open project and welcomes contributions of all kinds: code, docs, and more. However, before making a contribution, we ask you to heed these recommendations:
1. If the proposal entails a protocol change, please first submit a [Filecoin Improvement Proposal](https://github.com/filecoin-project/FIPs).
2. If the change is complex and requires prior discussion, [open an issue](github.com/filecoin-project/lotus/issues) or a [discussion](https://github.com/filecoin-project/lotus/discussions) to request feedback before you start working on a pull request. This is to avoid disappointment and sunk costs, in case the change is not actually needed or accepted.
3. Please refrain from submitting PRs to adapt existing code to subjective preferences. The changeset should contain functional or technical improvements/enhancements, bug fixes, new features, or some other clear material contribution. Simple stylistic changes are likely to be rejected in order to reduce code churn.
When implementing a change:
1. Adhere to the standard Go formatting guidelines, e.g. [Effective Go](https://golang.org/doc/effective_go.html). Run `go fmt`.
2. Stick to the idioms and patterns used in the codebase. Familiar-looking code has a higher chance of being accepted than eerie code. Pay attention to commonly used variable and parameter names, avoidance of naked returns, error handling patterns, etc.
3. Comments: follow the advice on the [Commentary](https://golang.org/doc/effective_go.html#commentary) section of Effective Go.
4. Minimize code churn. Modify only what is strictly necessary. Well-encapsulated changesets will get a quicker response from maintainers.
5. Lint your code with [`golangci-lint`](https://golangci-lint.run) (CI will reject your PR if unlinted).
6. Add tests.
7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
## Basic Build Instructions
**System-specific Software Dependencies**:
Building Lotus requires some system dependencies, usually provided by your distribution.
Ubuntu/Debian:
```
sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y
```
Fedora:
```
sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc hwloc-devel
```
For other distributions you can find the required dependencies [here.](https://lotus.filecoin.io/lotus/install/prerequisites/#supported-platforms) For instructions specific to macOS, you can find them [here.](https://lotus.filecoin.io/lotus/install/macos/)
#### Go
To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/):
```bash
wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
```
**TIP:**
You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like:
```shell
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc
```
See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck.
### Build and install Lotus
Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`).
1. Clone the repository:
```sh
git clone https://github.com/filecoin-project/lotus.git
cd lotus/
```
Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases).
2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases).
If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://lotus.filecoin.io/lotus/manage/switch-networks/) before proceeding.
For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below.
```sh
git checkout <tag_or_branch>
# For example:
git checkout <vX.X.X> # tag for a release
```
Currently, the latest code on the _master_ branch corresponds to mainnet.
3. If you are in China, see "[Lotus: tips when running in China](https://lotus.filecoin.io/lotus/configure/nodes-in-china/)".
4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://lotus.filecoin.io/lotus/install/prerequisites/). Note, if you are building the proof binaries from source, [installing rustup](https://lotus.filecoin.io/lotus/install/linux/#rustup) is also needed.
5. Build and install Lotus:
```sh
make clean all #mainnet
# Or to join a testnet or devnet:
make clean calibnet # Calibration with min 32GiB sectors
sudo make install
```
This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`.
`lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://lotus.filecoin.io/lotus/configure/defaults/#environment-variables) for information on how to customize the Lotus folder.
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://lotus.filecoin.io/lotus/install/linux/#start-the-lotus-daemon-and-sync-the-chain).
## License

23
SECURITY.md Normal file
View File

@ -0,0 +1,23 @@
# Security Policy
## Reporting a Vulnerability
For reporting security vulnerabilities/bugs, please consult our Security Policy and Responsible Disclosure Program information at https://github.com/filecoin-project/community/blob/master/SECURITY.md. Security vulnerabilities should be reported via our [Vulnerability Reporting channels](https://github.com/filecoin-project/community/blob/master/SECURITY.md#vulnerability-reporting) and will be eligible for a [Bug Bounty](https://security.filecoin.io/bug-bounty/).
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
Here are some examples of bugs we would consider to be security vulnerabilities:
* If you can spend from a `multisig` wallet you do not control the keys for.
* If you can cause a miner to be slashed without them actually misbehaving.
* If you can maintain power without submitting windowed posts regularly.
* If you can craft a message that causes lotus nodes to panic.
* If you can cause your miner to win significantly more blocks than it should.
* If you can craft a message that causes a persistent fork in the network.
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
This is not an exhaustive list, but should provide some idea of what we consider as a security vulnerability, .
## Reporting a non security bug
For non-security bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).

14
api/README.md Normal file
View File

@ -0,0 +1,14 @@
## Lotus API
This package contains all lotus API definitions. Interfaces defined here are
exposed as JsonRPC 2.0 endpoints by lotus programs.
### Versions
| File | Alias File | Interface | Exposed by | Version | HTTP Endpoint | Status | Docs
|------------------|-------------------|----------------|--------------------|---------|---------------|------------------------------|------
| `api_common.go` | `v0api/latest.go` | `Common` | lotus; lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods.md)
| `api_full.go` | `v1api/latest.go` | `FullNode` | lotus | v1 | `/rpc/v1` | Latest, **Work in progress** | [Methods](../documentation/en/api-v1-unstable-methods.md)
| `api_storage.go` | `v0api/latest.go` | `StorageMiner` | lotus-miner | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-miner.md)
| `api_worker.go` | `v0api/latest.go` | `Worker` | lotus-worker | v0 | `/rpc/v0` | Latest, Stable | [Methods](../documentation/en/api-v0-methods-worker.md)
| `v0api/full.go` | | `FullNode` | lotus | v0 | `/rpc/v0` | Stable | [Methods](../documentation/en/api-v0-methods.md)

View File

@ -3,51 +3,71 @@ package api
import (
"context"
"fmt"
"time"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/google/uuid"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/build"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/journal/alerting"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Common interface {
// Auth
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
// MethodGroup: Auth
// network
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) //perm:admin
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
NetPeers(context.Context) ([]peer.AddrInfo, error)
NetConnect(context.Context, peer.AddrInfo) error
NetAddrsListen(context.Context) (peer.AddrInfo, error)
NetDisconnect(context.Context, peer.ID) error
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
// MethodGroup: Log
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error)
LogList(context.Context) ([]string, error) //perm:write
LogSetLevel(context.Context, string, string) error //perm:write
// LogAlerts returns list of all, active and inactive alerts tracked by the
// node
LogAlerts(ctx context.Context) ([]alerting.Alert, error) //perm:admin
// MethodGroup: Common
// Version provides information about API provider
Version(context.Context) (Version, error)
Version(context.Context) (APIVersion, error) //perm:read
LogList(context.Context) ([]string, error)
LogSetLevel(context.Context, string, string) error
// Discover returns an OpenRPC document describing an RPC API.
Discover(ctx context.Context) (apitypes.OpenRPCDocument, error) //perm:read
// trigger graceful shutdown
Shutdown(context.Context) error
Shutdown(context.Context) error //perm:admin
// StartTime returns node start time
StartTime(context.Context) (time.Time, error) //perm:read
// Session returns a random UUID of api provider session
Session(context.Context) (uuid.UUID, error) //perm:read
Closing(context.Context) (<-chan struct{}, error) //perm:read
}
// Version provides various build-time information
type Version struct {
// APIVersion provides various build-time information
type APIVersion struct {
Version string
// APIVersion is a binary encoded semver version of the remote implementing
// this api
//
// See APIVersion in build/version.go
APIVersion build.Version
APIVersion Version
// TODO: git commit / os / genesis cid?
@ -55,6 +75,6 @@ type Version struct {
BlockDelay uint64
}
func (v Version) String() string {
func (v APIVersion) String() string {
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
}

42
api/api_errors.go Normal file
View File

@ -0,0 +1,42 @@
package api
import (
"errors"
"reflect"
"github.com/filecoin-project/go-jsonrpc"
)
const (
EOutOfGas = iota + jsonrpc.FirstUserCode
EActorNotFound
)
type ErrOutOfGas struct{}
func (e *ErrOutOfGas) Error() string {
return "call ran out of gas"
}
type ErrActorNotFound struct{}
func (e *ErrActorNotFound) Error() string {
return "actor not found"
}
var RPCErrors = jsonrpc.NewErrors()
func ErrorIsIn(err error, errorTypes []error) bool {
for _, etype := range errorTypes {
tmp := reflect.New(reflect.PointerTo(reflect.ValueOf(etype).Elem().Type())).Interface()
if errors.As(err, tmp) {
return true
}
}
return false
}
func init() {
RPCErrors.Register(EOutOfGas, new(*ErrOutOfGas))
RPCErrors.Register(EActorNotFound, new(*ErrActorNotFound))
}

File diff suppressed because it is too large Load Diff

121
api/api_gateway.go Normal file
View File

@ -0,0 +1,121 @@
package api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/dline"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V1 (Unstable) API - to add methods to the V0 (Stable) API
// you'll have to add those methods to interfaces in `api/v0api`
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make clean && make deps && make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Gateway interface {
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error)
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error)
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainNotify(context.Context) (<-chan []*HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainGetGenesis(context.Context) (*types.TipSet, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*MsigTransaction, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MsigVesting, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error)
Version(context.Context) (APIVersion, error)
Discover(context.Context) (apitypes.OpenRPCDocument, error)
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error)
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error)
EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error)
EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error)
EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error)
EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error)
EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error)
EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error)
EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error)
EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error)
EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error)
EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error)
EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error)
EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error)
EthChainId(ctx context.Context) (ethtypes.EthUint64, error)
EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error)
NetVersion(ctx context.Context) (string, error)
NetListening(ctx context.Context) (bool, error)
EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error)
EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error)
EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error)
EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error)
EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error)
EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error)
EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error)
EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error)
EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error)
EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error)
EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error)
EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error)
EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error)
EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error)
EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error)
Web3ClientVersion(ctx context.Context) (string, error)
}

77
api/api_net.go Normal file
View File

@ -0,0 +1,77 @@
package api
import (
"context"
"time"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Net interface {
// MethodGroup: Net
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) //perm:read
NetPeers(context.Context) ([]peer.AddrInfo, error) //perm:read
NetPing(context.Context, peer.ID) (time.Duration, error) //perm:read
NetConnect(context.Context, peer.AddrInfo) error //perm:write
NetAddrsListen(context.Context) (peer.AddrInfo, error) //perm:read
NetDisconnect(context.Context, peer.ID) error //perm:write
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) //perm:read
NetPubsubScores(context.Context) ([]PubsubScore, error) //perm:read
NetAutoNatStatus(context.Context) (NatInfo, error) //perm:read
NetAgentVersion(ctx context.Context, p peer.ID) (string, error) //perm:read
NetPeerInfo(context.Context, peer.ID) (*ExtendedPeerInfo, error) //perm:read
// NetBandwidthStats returns statistics about the nodes total bandwidth
// usage and current rate across all peers and protocols.
NetBandwidthStats(ctx context.Context) (metrics.Stats, error) //perm:read
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
// usage and current rate per peer
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error) //perm:read
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
// usage and current rate per protocol
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error) //perm:read
// ConnectionGater API
NetBlockAdd(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin
NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read
NetProtectAdd(ctx context.Context, acl []peer.ID) error //perm:admin
NetProtectRemove(ctx context.Context, acl []peer.ID) error //perm:admin
NetProtectList(ctx context.Context) ([]peer.ID, error) //perm:read
// ResourceManager API
NetStat(ctx context.Context, scope string) (NetStat, error) //perm:read
NetLimit(ctx context.Context, scope string) (NetLimit, error) //perm:read
NetSetLimit(ctx context.Context, scope string, limit NetLimit) error //perm:admin
// ID returns peerID of libp2p node backing this API
ID(context.Context) (peer.ID, error) //perm:read
}
type CommonNet interface {
Common
Net
}
type NatInfo struct {
Reachability network.Reachability
PublicAddrs []string
}

View File

@ -3,62 +3,339 @@ package api
import (
"bytes"
"context"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/piecestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v9/market"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
abinetwork "github.com/filecoin-project/go-state-types/network"
builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/storage/pipeline/sealiface"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
// StorageMiner is a low-level interface to the Filecoin network storage miner node
type StorageMiner interface {
Common
Net
ActorAddress(context.Context) (address.Address, error)
ActorAddress(context.Context) (address.Address, error) //perm:read
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error)
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) //perm:read
ActorAddressConfig(ctx context.Context) (AddressConfig, error) //perm:read
MiningBase(context.Context) (*types.TipSet, error)
// WithdrawBalance allows to withdraw balance from miner actor to owner address
// Specify amount as "0" to withdraw full balance. This method returns a message CID
// and does not wait for message execution
ActorWithdrawBalance(ctx context.Context, amount abi.TokenAmount) (cid.Cid, error) //perm:admin
// BeneficiaryWithdrawBalance allows the beneficiary of a miner to withdraw balance from miner actor
// Specify amount as "0" to withdraw full balance. This method returns a message CID
// and does not wait for message execution
BeneficiaryWithdrawBalance(context.Context, abi.TokenAmount) (cid.Cid, error) //perm:admin
MiningBase(context.Context) (*types.TipSet, error) //perm:read
ComputeWindowPoSt(ctx context.Context, dlIdx uint64, tsk types.TipSetKey) ([]miner.SubmitWindowedPoStParams, error) //perm:admin
ComputeDataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (abi.PieceInfo, error) //perm:admin
// Temp api for testing
PledgeSector(context.Context) error
PledgeSector(context.Context) (abi.SectorID, error) //perm:write
// Get the status of a given sector by ID
SectorsStatus(context.Context, abi.SectorNumber) (SectorInfo, error)
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error) //perm:read
// Add piece to an open sector. If no sectors with enough space are open,
// either a new sector will be created, or this call will block until more
// sectors can be created.
SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPieceSize, r storiface.Data, d PieceDealInfo) (SectorOffset, error) //perm:admin
SectorsUnsealPiece(ctx context.Context, sector storiface.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd *cid.Cid) error //perm:admin
// List all staged sectors
SectorsList(context.Context) ([]abi.SectorNumber, error)
SectorsList(context.Context) ([]abi.SectorNumber, error) //perm:read
SectorsRefs(context.Context) (map[string][]SealedRef, error)
// Get summary info of sectors
SectorsSummary(ctx context.Context) (map[SectorState]int, error) //perm:read
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
// List sectors in particular states
SectorsListInStates(context.Context, []SectorState) ([]abi.SectorNumber, error) //perm:read
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error)
SectorsRefs(context.Context) (map[string][]SealedRef, error) //perm:read
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
// to trigger sealing early
SectorStartSealing(context.Context, abi.SectorNumber) error //perm:write
// SectorSetSealDelay sets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorSetSealDelay(context.Context, time.Duration) error //perm:write
// SectorGetSealDelay gets the time that a newly-created sector
// waits for more deals before it starts sealing
SectorGetSealDelay(context.Context) (time.Duration, error) //perm:read
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
SectorSetExpectedSealDuration(context.Context, time.Duration) error //perm:write
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
SectorGetExpectedSealDuration(context.Context) (time.Duration, error) //perm:read
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error //perm:admin
// SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can
// be done with SectorTerminate. Removing and not terminating live sectors will cause additional penalties.
SectorRemove(context.Context, abi.SectorNumber) error //perm:admin
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin
// SectorTerminate terminates the sector on-chain (adding it to a termination batch first), then
// automatically removes it from storage
SectorTerminate(context.Context, abi.SectorNumber) error //perm:admin
// SectorTerminateFlush immediately sends a terminate message with sectors batched for termination.
// Returns null if message wasn't sent
SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin
// SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message
SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit.
// Returns null if message wasn't sent
SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin
// SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message
SectorPreCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
// SectorCommitFlush immediately sends a Commit message with sectors aggregated for Commit.
// Returns null if message wasn't sent
SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin
// SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message
SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin
SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin
// SectorAbortUpgrade can be called on sectors that are in the process of being upgraded to abort it
SectorAbortUpgrade(context.Context, abi.SectorNumber) error //perm:admin
// SectorUnseal unseals the provided sector
SectorUnseal(ctx context.Context, number abi.SectorNumber) error //perm:admin
// SectorNumAssignerMeta returns sector number assigner metadata - reserved/allocated
SectorNumAssignerMeta(ctx context.Context) (NumAssignerMeta, error) //perm:read
// SectorNumReservations returns a list of sector number reservations
SectorNumReservations(ctx context.Context) (map[string]bitfield.BitField, error) //perm:read
// SectorNumReserve creates a new sector number reservation. Will fail if any other reservation has colliding
// numbers or name. Set force to true to override safety checks.
// Valid characters for name: a-z, A-Z, 0-9, _, -
SectorNumReserve(ctx context.Context, name string, sectors bitfield.BitField, force bool) error //perm:admin
// SectorNumReserveCount creates a new sector number reservation for `count` sector numbers.
// by default lotus will allocate lowest-available sector numbers to the reservation.
// For restrictions on `name` see SectorNumReserve
SectorNumReserveCount(ctx context.Context, name string, count uint64) (bitfield.BitField, error) //perm:admin
// SectorNumFree drops a sector reservation
SectorNumFree(ctx context.Context, name string) error //perm:admin
SectorReceive(ctx context.Context, meta RemoteSectorMeta) error //perm:admin
// WorkerConnect tells the node to connect to workers RPC
WorkerConnect(context.Context, string) error
WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error)
WorkerConnect(context.Context, string) error //perm:admin retry:true
WorkerStats(context.Context) (map[uuid.UUID]storiface.WorkerStats, error) //perm:admin
WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin
stores.SectorIndex
// storiface.WorkerReturn
ReturnDataCid(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storiface.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storiface.SectorCids, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storiface.Commit1Out, err *storiface.CallError) error //perm:admin retry:true
ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storiface.Proof, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storiface.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storiface.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true
ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storiface.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true
ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnFinalizeReplicaUpdate(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true
ReturnDownloadSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error)
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
MarketSetPrice(context.Context, types.BigInt) error
// SealingSchedDiag dumps internal sealing scheduler state
SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin
SealingAbort(ctx context.Context, call storiface.CallID) error //perm:admin
// SealingSchedRemove removes a request from sealing pipeline
SealingRemoveRequest(ctx context.Context, schedId uuid.UUID) error //perm:admin
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error)
// paths.SectorIndex
StorageAttach(context.Context, storiface.StorageInfo, fsutil.FsStat) error //perm:admin
StorageDetach(ctx context.Context, id storiface.ID, url string) error //perm:admin
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
StorageReportHealth(context.Context, storiface.ID, storiface.HealthReport) error //perm:admin
StorageDeclareSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error //perm:admin
StorageDropSector(ctx context.Context, storageID storiface.ID, s abi.SectorID, ft storiface.SectorFileType) error //perm:admin
// StorageFindSector returns list of paths where the specified sector files exist.
//
// If allowFetch is set, list of paths to which the sector can be fetched will also be returned.
// - Paths which have sector files locally (don't require fetching) will be listed first.
// - Paths which have sector files locally will not be filtered based on based on AllowTypes/DenyTypes.
// - Paths which require fetching will be filtered based on AllowTypes/DenyTypes. If multiple
// file types are specified, each type will be considered individually, and a union of all paths
// which can accommodate each file type will be returned.
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
// Paths with more weight and more % of free space are preferred.
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
StorageAddLocal(ctx context.Context, path string) error
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
StorageAuthVerify(ctx context.Context, token string) ([]auth.Permission, error) //perm:read
StorageAddLocal(ctx context.Context, path string) error //perm:admin
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
// MarketListRetrievalDeals is deprecated, returns empty list
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
// MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync
MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin
// DagstoreListShards returns information about all shards known to the
// DAG store. Only available on nodes running the markets subsystem.
DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read
// DagstoreInitializeShard initializes an uninitialized shard.
//
// Initialization consists of fetching the shard's data (deal payload) from
// the storage subsystem, generating an index, and persisting the index
// to facilitate later retrievals, and/or to publish to external sources.
//
// This operation is intended to complement the initial migration. The
// migration registers a shard for every unique piece CID, with lazy
// initialization. Thus, shards are not initialized immediately to avoid
// IO activity competing with proving. Instead, shard are initialized
// when first accessed. This method forces the initialization of a shard by
// accessing it and immediately releasing it. This is useful to warm up the
// cache to facilitate subsequent retrievals, and to generate the indexes
// to publish them externally.
//
// This operation fails if the shard is not in ShardStateNew state.
// It blocks until initialization finishes.
DagstoreInitializeShard(ctx context.Context, key string) error //perm:write
// DagstoreRecoverShard attempts to recover a failed shard.
//
// This operation fails if the shard is not in ShardStateErrored state.
// It blocks until recovery finishes. If recovery failed, it returns the
// error.
DagstoreRecoverShard(ctx context.Context, key string) error //perm:write
// DagstoreInitializeAll initializes all uninitialized shards in bulk,
// according to the policy passed in the parameters.
//
// It is recommended to set a maximum concurrency to avoid extreme
// IO pressure if the storage subsystem has a large amount of deals.
//
// It returns a stream of events to report progress.
DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write
// DagstoreGC runs garbage collection on the DAG store.
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
// DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID
DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
// so they can download its index
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
// DagstoreLookupPieces returns information about shards that contain the given CID.
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
// RuntimeSubsystems returns the subsystems that are enabled
// in this instance.
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus-miner is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storiface.SectorRef) (map[abi.SectorNumber]string, error) //perm:admin
ComputeProof(ctx context.Context, ssi []builtinactors.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtinactors.PoStProof, error) //perm:read
// RecoverFault can be used to declare recoveries manually. It sends messages
// to the miner actor with details of recovered sectors and returns the CID of messages. It honors the
// maxPartitionsPerRecoveryMessage from the config
RecoverFault(ctx context.Context, sectors []abi.SectorNumber) ([]cid.Cid, error) //perm:admin
}
var _ storiface.WorkerReturn = *new(StorageMiner)
type SealRes struct {
Err string
GoErr error `json:"-"`
@ -75,25 +352,48 @@ type SectorLog struct {
Message string
}
type SectorPiece struct {
Piece abi.PieceInfo
DealInfo *PieceDealInfo // nil for pieces which do not appear in deals (e.g. filler pieces)
}
type SectorInfo struct {
SectorID abi.SectorNumber
State SectorState
CommD *cid.Cid
CommR *cid.Cid
Proof []byte
Deals []abi.DealID
Ticket SealTicket
Seed SealSeed
Retries uint64
SectorID abi.SectorNumber
State SectorState
CommD *cid.Cid
CommR *cid.Cid
Proof []byte
Deals []abi.DealID
Pieces []SectorPiece
Ticket SealTicket
Seed SealSeed
PreCommitMsg *cid.Cid
CommitMsg *cid.Cid
Retries uint64
ToUpgrade bool
ReplicaUpdateMessage *cid.Cid
LastErr string
Log []SectorLog
// On Chain Info
SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s
Activation abi.ChainEpoch // Epoch during which the sector proof was accepted
Expiration abi.ChainEpoch // Epoch during which the sector expires
DealWeight abi.DealWeight // Integral of active deals over sector lifetime
VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime
InitialPledge abi.TokenAmount // Pledge collected to commit this sector
// Expiration Info
OnTime abi.ChainEpoch
// non-zero if sector is faulty, epoch at which it will be permanently
// removed if it doesn't recover
Early abi.ChainEpoch
}
type SealedRef struct {
SectorID abi.SectorNumber
Offset uint64
Offset abi.PaddedPieceSize
Size abi.UnpaddedPieceSize
}
@ -120,3 +420,205 @@ func (st *SealSeed) Equals(ost *SealSeed) bool {
}
type SectorState string
func (s *SectorState) String() string {
return string(*s)
}
type AddrUse int
const (
PreCommitAddr AddrUse = iota
CommitAddr
DealPublishAddr
PoStAddr
TerminateSectorsAddr
)
type AddressConfig struct {
PreCommitControl []address.Address
CommitControl []address.Address
TerminateControl []address.Address
DealPublishControl []address.Address
DisableOwnerFallback bool
DisableWorkerFallback bool
}
// PendingDealInfo has info about pending deals and when they are due to be
// published
type PendingDealInfo struct {
Deals []market.ClientDealProposal
PublishPeriodStart time.Time
PublishPeriod time.Duration
}
type SectorOffset struct {
Sector abi.SectorNumber
Offset abi.PaddedPieceSize
}
// DealInfo is a tuple of deal identity and its schedule
type PieceDealInfo struct {
PublishCid *cid.Cid
DealID abi.DealID
DealProposal *market.DealProposal
DealSchedule DealSchedule
KeepUnsealed bool
}
// DealSchedule communicates the time interval of a storage deal. The deal must
// appear in a sealed (proven) sector no later than StartEpoch, otherwise it
// is invalid.
type DealSchedule struct {
StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch
}
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
// we expose through JSON-RPC to avoid clients having to depend on the
// dagstore lib.
type DagstoreShardInfo struct {
Key string
State string
Error string
}
// DagstoreShardResult enumerates results per shard.
type DagstoreShardResult struct {
Key string
Success bool
Error string
}
type DagstoreInitializeAllParams struct {
MaxConcurrency int
IncludeSealed bool
}
// DagstoreInitializeAllEvent represents an initialization event.
type DagstoreInitializeAllEvent struct {
Key string
Event string // "start", "end"
Success bool
Error string
Total int
Current int
}
type NumAssignerMeta struct {
Reserved bitfield.BitField
Allocated bitfield.BitField
// ChainAllocated+Reserved+Allocated
InUse bitfield.BitField
Next abi.SectorNumber
}
type RemoteSectorMeta struct {
////////
// BASIC SECTOR INFORMATION
// State specifies the first state the sector will enter after being imported
// Must be one of the following states:
// * Packing
// * GetTicket
// * PreCommitting
// * SubmitCommit
// * Proving/Available
State SectorState
Sector abi.SectorID
Type abi.RegisteredSealProof
////////
// SEALING METADATA
// (allows lotus to continue the sealing process)
// Required in Packing and later
Pieces []SectorPiece // todo better type?
// Required in PreCommitting and later
TicketValue abi.SealRandomness
TicketEpoch abi.ChainEpoch
PreCommit1Out storiface.PreCommit1Out // todo specify better
CommD *cid.Cid
CommR *cid.Cid // SectorKey
// Required in SubmitCommit and later
PreCommitInfo *miner.SectorPreCommitInfo
PreCommitDeposit *big.Int
PreCommitMessage *cid.Cid
PreCommitTipSet types.TipSetKey
SeedValue abi.InteractiveSealRandomness
SeedEpoch abi.ChainEpoch
CommitProof []byte
// Required in Proving/Available
CommitMessage *cid.Cid
// Optional sector metadata to import
Log []SectorLog
////////
// SECTOR DATA SOURCE
// Sector urls - lotus will use those for fetching files into local storage
// Required in all states
DataUnsealed *storiface.SectorLocation
// Required in PreCommitting and later
DataSealed *storiface.SectorLocation
DataCache *storiface.SectorLocation
////////
// SEALING SERVICE HOOKS
// URL
// RemoteCommit1Endpoint is an URL of POST endpoint which lotus will call requesting Commit1 (seal_commit_phase1)
// request body will be json-serialized RemoteCommit1Params struct
RemoteCommit1Endpoint string
// RemoteCommit2Endpoint is an URL of POST endpoint which lotus will call requesting Commit2 (seal_commit_phase2)
// request body will be json-serialized RemoteCommit2Params struct
RemoteCommit2Endpoint string
// RemoteSealingDoneEndpoint is called after the sector exists the sealing pipeline
// request body will be json-serialized RemoteSealingDoneParams struct
RemoteSealingDoneEndpoint string
}
type RemoteCommit1Params struct {
Ticket, Seed []byte
Unsealed cid.Cid
Sealed cid.Cid
ProofType abi.RegisteredSealProof
}
type RemoteCommit2Params struct {
Sector abi.SectorID
ProofType abi.RegisteredSealProof
// todo spec better
Commit1Out storiface.Commit1Out
}
type RemoteSealingDoneParams struct {
// Successful is true if the sector has entered state considered as "successfully sealed"
Successful bool
// State is the state the sector has entered
// For example "Proving" / "Removing"
State string
// Optional commit message CID
CommitMessage *cid.Cid
}

View File

@ -1,12 +1,20 @@
// stm: #unit
package api
import (
"encoding/json"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-jsonrpc"
)
func goCmd() string {
@ -22,6 +30,7 @@ func goCmd() string {
}
func TestDoesntDependOnFFI(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_001
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
if err != nil {
t.Fatal(err)
@ -32,3 +41,104 @@ func TestDoesntDependOnFFI(t *testing.T) {
}
}
}
func TestDoesntDependOnBuild(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_FFI_DEPENDENCE_002
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
if err != nil {
t.Fatal(err)
}
for _, pkg := range strings.Fields(string(deps)) {
if pkg == "github.com/filecoin-project/build" {
t.Fatal("api depends on filecoin-ffi")
}
}
}
func TestReturnTypes(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_001
errType := reflect.TypeOf(new(error)).Elem()
bareIface := reflect.TypeOf(new(interface{})).Elem()
jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem()
tst := func(api interface{}) func(t *testing.T) {
return func(t *testing.T) {
ra := reflect.TypeOf(api).Elem()
for i := 0; i < ra.NumMethod(); i++ {
m := ra.Method(i)
switch m.Type.NumOut() {
case 1: // if 1 return value, it must be an error
require.Equal(t, errType, m.Type.Out(0), m.Name)
case 2: // if 2 return values, first cant be an interface/function, second must be an error
seen := map[reflect.Type]struct{}{}
todo := []reflect.Type{m.Type.Out(0)}
for len(todo) > 0 {
typ := todo[len(todo)-1]
todo = todo[:len(todo)-1]
if _, ok := seen[typ]; ok {
continue
}
seen[typ] = struct{}{}
if typ.Kind() == reflect.Interface && typ != bareIface && !typ.Implements(jmarsh) {
t.Error("methods can't return interfaces or struct types not implementing json.Marshaller", m.Name)
}
switch typ.Kind() {
case reflect.Ptr:
fallthrough
case reflect.Array:
fallthrough
case reflect.Slice:
fallthrough
case reflect.Chan:
todo = append(todo, typ.Elem())
case reflect.Map:
todo = append(todo, typ.Elem())
todo = append(todo, typ.Key())
case reflect.Struct:
for i := 0; i < typ.NumField(); i++ {
todo = append(todo, typ.Field(i).Type)
}
}
}
require.NotEqual(t, reflect.Func.String(), m.Type.Out(0).Kind().String(), m.Name)
require.Equal(t, errType, m.Type.Out(1), m.Name)
default:
t.Error("methods can only have 1 or 2 return values", m.Name)
}
}
}
}
t.Run("common", tst(new(Common)))
t.Run("full", tst(new(FullNode)))
t.Run("miner", tst(new(StorageMiner)))
t.Run("worker", tst(new(Worker)))
}
func TestPermTags(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_PERM_TAGS_001
_ = PermissionedFullAPI(&FullNodeStruct{})
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
_ = PermissionedWorkerAPI(&WorkerStruct{})
}
func TestRetryErrorIsInTrue(t *testing.T) {
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
require.True(t, ErrorIsIn(&jsonrpc.RPCConnectionError{}, errorsToRetry))
}
func TestRetryErrorIsInFalse(t *testing.T) {
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
require.False(t, ErrorIsIn(xerrors.Errorf("random error"), errorsToRetry))
}
func TestRetryWrappedErrorIsInTrue(t *testing.T) {
errorsToRetry := []error{&jsonrpc.RPCConnectionError{}}
require.True(t, ErrorIsIn(xerrors.Errorf("wrapped: %w", &jsonrpc.RPCConnectionError{}), errorsToRetry))
}

47
api/api_wallet.go Normal file
View File

@ -0,0 +1,47 @@
package api
import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/chain/types"
)
type MsgType string
const (
MTUnknown = "unknown"
// Signing message CID. MsgMeta.Extra contains raw cbor message bytes
MTChainMsg = "message"
// Signing a blockheader. signing raw cbor block bytes (MsgMeta.Extra is empty)
MTBlock = "block"
// Signing a deal proposal. signing raw cbor proposal bytes (MsgMeta.Extra is empty)
MTDealProposal = "dealproposal"
// TODO: Deals, Vouchers, VRF
)
type MsgMeta struct {
Type MsgType
// Additional data related to what is signed. Should be verifiable with the
// signed bytes (e.g. CID(Extra).Bytes() == toSign)
Extra []byte
}
type Wallet interface {
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin
WalletHas(context.Context, address.Address) (bool, error) //perm:admin
WalletList(context.Context) ([]address.Address, error) //perm:admin
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
WalletDelete(context.Context, address.Address) error //perm:admin
}

View File

@ -3,25 +3,88 @@ package api
import (
"context"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-storage/storage"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/proof"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type WorkerApi interface {
Version(context.Context) (build.Version, error)
// TODO: Info() (name, ...) ?
// MODIFYING THE API INTERFACE
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
Paths(context.Context) ([]stores.StoragePath, error)
Info(context.Context) (storiface.WorkerInfo, error)
type Worker interface {
Version(context.Context) (Version, error) //perm:admin
storage.Sealer
Fetch(context.Context, abi.SectorID, stores.SectorFileType, bool) error
// TaskType -> Weight
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) //perm:admin
Paths(context.Context) ([]storiface.StoragePath, error) //perm:admin
Info(context.Context) (storiface.WorkerInfo, error) //perm:admin
// storiface.WorkerCalls
DataCid(ctx context.Context, pieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) //perm:admin
AddPiece(ctx context.Context, sector storiface.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storiface.Data) (storiface.CallID, error) //perm:admin
SealPreCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
SealPreCommit2(ctx context.Context, sector storiface.SectorRef, pc1o storiface.PreCommit1Out) (storiface.CallID, error) //perm:admin
SealCommit1(ctx context.Context, sector storiface.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storiface.SectorCids) (storiface.CallID, error) //perm:admin
SealCommit2(ctx context.Context, sector storiface.SectorRef, c1o storiface.Commit1Out) (storiface.CallID, error) //perm:admin
FinalizeSector(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) //perm:admin
FinalizeReplicaUpdate(ctx context.Context, sector storiface.SectorRef) (storiface.CallID, error) //perm:admin
ReplicaUpdate(ctx context.Context, sector storiface.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate1(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin
ProveReplicaUpdate2(ctx context.Context, sector storiface.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storiface.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin
GenerateSectorKeyFromData(ctx context.Context, sector storiface.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin
ReleaseUnsealed(ctx context.Context, sector storiface.SectorRef, keepUnsealed []storiface.Range) (storiface.CallID, error) //perm:admin
MoveStorage(ctx context.Context, sector storiface.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
UnsealPiece(context.Context, storiface.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
Fetch(context.Context, storiface.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
DownloadSectorData(ctx context.Context, sector storiface.SectorRef, finalized bool, src map[storiface.SectorFileType]storiface.SectorLocation) (storiface.CallID, error) //perm:admin
GenerateWinningPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, randomness abi.PoStRandomness) ([]proof.PoStProof, error) //perm:admin
GenerateWindowPoSt(ctx context.Context, ppt abi.RegisteredPoStProof, mid abi.ActorID, sectors []storiface.PostSectorChallenge, partitionIdx int, randomness abi.PoStRandomness) (storiface.WindowPoStResult, error) //perm:admin
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
TaskEnable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin
// Storage / Other
Remove(ctx context.Context, sector abi.SectorID) error //perm:admin
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
StorageAddLocal(ctx context.Context, path string) error //perm:admin
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageDetachAll(ctx context.Context) error //perm:admin
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
// SetEnabled marks the worker as enabled/disabled. Not that this setting
// may take a few seconds to propagate to task scheduler
SetEnabled(ctx context.Context, enabled bool) error //perm:admin
Enabled(ctx context.Context) (bool, error) //perm:admin
// WaitQuiet blocks until there are no tasks running
WaitQuiet(ctx context.Context) error //perm:admin
// returns a random UUID of worker session, generated randomly when worker
// process starts
ProcessSession(context.Context) (uuid.UUID, error) //perm:admin
// Like ProcessSession, but returns an error when worker is disabled
Session(context.Context) (uuid.UUID, error) //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
Closing(context.Context) (<-chan struct{}, error)
}
var _ storiface.WorkerCalls = *new(Worker)

View File

@ -1,67 +0,0 @@
package apibstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
"golang.org/x/xerrors"
)
type ChainIO interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
}
type apiBStore struct {
api ChainIO
}
func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore {
return &apiBStore{
api: cio,
}
}
func (a *apiBStore) DeleteBlock(cid.Cid) error {
return xerrors.New("not supported")
}
func (a *apiBStore) Has(c cid.Cid) (bool, error) {
return a.api.ChainHasObj(context.TODO(), c)
}
func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) {
bb, err := a.api.ChainReadObj(context.TODO(), c)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(bb, c)
}
func (a *apiBStore) GetSize(c cid.Cid) (int, error) {
bb, err := a.api.ChainReadObj(context.TODO(), c)
if err != nil {
return 0, err
}
return len(bb), nil
}
func (a *apiBStore) Put(blocks.Block) error {
return xerrors.New("not supported")
}
func (a *apiBStore) PutMany([]blocks.Block) error {
return xerrors.New("not supported")
}
func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.New("not supported")
}
func (a *apiBStore) HashOnRead(enabled bool) {
return
}
var _ blockstore.Blockstore = &apiBStore{}

View File

@ -1,38 +0,0 @@
package apistruct
import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
)
const (
// When changing these, update docs/API.md too
PermRead auth.Permission = "read" // default
PermWrite auth.Permission = "write"
PermSign auth.Permission = "sign" // Use wallet keys for signing
PermAdmin auth.Permission = "admin" // Manage permissions
)
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
var DefaultPerms = []auth.Permission{PermRead}
func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
var out StorageMinerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}
func PermissionedFullAPI(a api.FullNode) api.FullNode {
var out FullNodeStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}
func PermissionedWorkerAPI(a api.WorkerApi) api.WorkerApi {
var out WorkerStruct
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
return &out
}

View File

@ -1,879 +0,0 @@
package apistruct
import (
"context"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/sector-storage/sealtasks"
"github.com/filecoin-project/sector-storage/stores"
"github.com/filecoin-project/sector-storage/storiface"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-storage/storage"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
// All permissions are listed in permissioned.go
var _ = AllPermissions
type CommonStruct struct {
Internal struct {
AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"`
AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"`
NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"`
NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"`
NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"`
NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"`
NetDisconnect func(context.Context, peer.ID) error `perm:"write"`
NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"`
ID func(context.Context) (peer.ID, error) `perm:"read"`
Version func(context.Context) (api.Version, error) `perm:"read"`
LogList func(context.Context) ([]string, error) `perm:"write"`
LogSetLevel func(context.Context, string, string) error `perm:"write"`
Shutdown func(context.Context) error `perm:"admin"`
}
}
// FullNodeStruct implements API passing calls to user-provided function values.
type FullNodeStruct struct {
CommonStruct
Internal struct {
ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"`
ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"`
ChainGetRandomness func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"`
ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"`
ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"`
ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"`
ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"`
ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"`
ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"`
ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"`
ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"`
ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"`
ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"`
ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"`
ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"`
ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"`
ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"`
SyncState func(context.Context) (*api.SyncState, error) `perm:"read"`
SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"`
SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"`
SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"`
SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"`
MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"`
MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"`
MpoolPushMessage func(context.Context, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"`
MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"`
MpoolEstimateGasPrice func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"`
MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"`
WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"`
WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"`
WalletList func(context.Context) ([]address.Address, error) `perm:"write"`
WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"`
WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"`
WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"`
WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) bool `perm:"read"`
WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"`
WalletSetDefault func(context.Context, address.Address) error `perm:"admin"`
WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"`
WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"`
ClientImport func(ctx context.Context, ref api.FileRef) (cid.Cid, error) `perm:"admin"`
ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"`
ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"`
ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"`
ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"`
ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"`
ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"`
ClientCalcCommP func(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) `perm:"read"`
ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"`
StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"`
StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerProvingSet func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"`
StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"`
StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"`
StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) `perm:"read"`
StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) `perm:"read"`
StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"`
StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"`
StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"`
StateMinerInitialPledgeCollateral func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"`
StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"`
StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"`
StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"`
StateReadState func(context.Context, *types.Actor, types.TipSetKey) (*api.ActorState, error) `perm:"read"`
StatePledgeCollateral func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"`
StateWaitMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"`
StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"`
StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"`
StateMarketParticipants func(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"`
StateMarketDeals func(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"`
StateMarketStorageDeal func(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) `perm:"read"`
StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"`
StateAccountKey func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"`
StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"`
StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"`
StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"`
StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"`
StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"`
MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"`
MsigCreate func(context.Context, int64, []address.Address, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MsigCancel func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
PaychGet func(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) `perm:"sign"`
PaychList func(context.Context) ([]address.Address, error) `perm:"read"`
PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"`
PaychClose func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"`
PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"`
PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"`
PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"`
PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"`
PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"`
PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"`
PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"`
PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"`
PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error) `perm:"sign"`
}
}
func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) {
return c.Internal.StateMinerSectorCount(ctx, addr, tsk)
}
type StorageMinerStruct struct {
CommonStruct
Internal struct {
ActorAddress func(context.Context) (address.Address, error) `perm:"read"`
ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"`
MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"`
MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"`
MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"`
MarketSetPrice func(context.Context, types.BigInt) error `perm:"admin"`
PledgeSector func(context.Context) error `perm:"write"`
SectorsStatus func(context.Context, abi.SectorNumber) (api.SectorInfo, error) `perm:"read"`
SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"`
SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"`
SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"`
WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm
WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"`
StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"`
StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"`
StorageStat func(context.Context, stores.ID) (stores.FsStat, error) `perm:"admin"`
StorageAttach func(context.Context, stores.StorageInfo, stores.FsStat) error `perm:"admin"`
StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"`
StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"`
StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, bool) ([]stores.StorageInfo, error) `perm:"admin"`
StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"`
StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredProof, sealing bool) ([]stores.StorageInfo, error) `perm:"admin"`
StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"`
DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"`
DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"`
StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"`
}
}
type WorkerStruct struct {
Internal struct {
// TODO: lower perms
Version func(context.Context) (build.Version, error) `perm:"admin"`
TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"`
Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"`
Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"`
SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"`
SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"`
SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"`
SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"`
FinalizeSector func(context.Context, abi.SectorID) error `perm:"admin"`
Fetch func(context.Context, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"`
Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"`
}
}
// CommonStruct
func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) {
return c.Internal.AuthVerify(ctx, token)
}
func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) {
return c.Internal.AuthNew(ctx, perms)
}
func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) {
return c.Internal.NetConnectedness(ctx, pid)
}
func (c *CommonStruct) NetPeers(ctx context.Context) ([]peer.AddrInfo, error) {
return c.Internal.NetPeers(ctx)
}
func (c *CommonStruct) NetConnect(ctx context.Context, p peer.AddrInfo) error {
return c.Internal.NetConnect(ctx, p)
}
func (c *CommonStruct) NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) {
return c.Internal.NetAddrsListen(ctx)
}
func (c *CommonStruct) NetDisconnect(ctx context.Context, p peer.ID) error {
return c.Internal.NetDisconnect(ctx, p)
}
func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) {
return c.Internal.NetFindPeer(ctx, p)
}
// ID implements API.ID
func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) {
return c.Internal.ID(ctx)
}
// Version implements API.Version
func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) {
return c.Internal.Version(ctx)
}
func (c *CommonStruct) LogList(ctx context.Context) ([]string, error) {
return c.Internal.LogList(ctx)
}
func (c *CommonStruct) LogSetLevel(ctx context.Context, group, level string) error {
return c.Internal.LogSetLevel(ctx, group, level)
}
func (c *CommonStruct) Shutdown(ctx context.Context) error {
return c.Internal.Shutdown(ctx)
}
// FullNodeStruct
func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) {
return c.Internal.ClientListImports(ctx)
}
func (c *FullNodeStruct) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) {
return c.Internal.ClientImport(ctx, ref)
}
func (c *FullNodeStruct) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) {
return c.Internal.ClientHasLocal(ctx, root)
}
func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) {
return c.Internal.ClientFindData(ctx, root)
}
func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) {
return c.Internal.ClientStartDeal(ctx, params)
}
func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (*api.DealInfo, error) {
return c.Internal.ClientGetDealInfo(ctx, deal)
}
func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) {
return c.Internal.ClientListDeals(ctx)
}
func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error {
return c.Internal.ClientRetrieve(ctx, order, ref)
}
func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) {
return c.Internal.ClientQueryAsk(ctx, p, miner)
}
func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) {
return c.Internal.ClientCalcCommP(ctx, inpath, miner)
}
func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error {
return c.Internal.ClientGenCar(ctx, ref, outpath)
}
func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) {
return c.Internal.MpoolPending(ctx, tsk)
}
func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) {
return c.Internal.MpoolPush(ctx, smsg)
}
func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) {
return c.Internal.MpoolPushMessage(ctx, msg)
}
func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) {
return c.Internal.MpoolSub(ctx)
}
func (c *FullNodeStruct) MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, limit int64, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.MpoolEstimateGasPrice(ctx, nblocksincl, sender, limit, tsk)
}
func (c *FullNodeStruct) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) {
return c.Internal.MinerGetBaseInfo(ctx, maddr, epoch, tsk)
}
func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) {
return c.Internal.MinerCreateBlock(ctx, bt)
}
func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) {
return c.Internal.ChainHead(ctx)
}
func (c *FullNodeStruct) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
return c.Internal.ChainGetRandomness(ctx, tsk, personalization, randEpoch, entropy)
}
func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) {
return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk)
}
func (c *FullNodeStruct) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) {
return c.Internal.WalletNew(ctx, typ)
}
func (c *FullNodeStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) {
return c.Internal.WalletHas(ctx, addr)
}
func (c *FullNodeStruct) WalletList(ctx context.Context) ([]address.Address, error) {
return c.Internal.WalletList(ctx)
}
func (c *FullNodeStruct) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) {
return c.Internal.WalletBalance(ctx, a)
}
func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {
return c.Internal.WalletSign(ctx, k, msg)
}
func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) {
return c.Internal.WalletSignMessage(ctx, k, msg)
}
func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) bool {
return c.Internal.WalletVerify(ctx, k, msg, sig)
}
func (c *FullNodeStruct) WalletDefaultAddress(ctx context.Context) (address.Address, error) {
return c.Internal.WalletDefaultAddress(ctx)
}
func (c *FullNodeStruct) WalletSetDefault(ctx context.Context, a address.Address) error {
return c.Internal.WalletSetDefault(ctx, a)
}
func (c *FullNodeStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) {
return c.Internal.WalletExport(ctx, a)
}
func (c *FullNodeStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) {
return c.Internal.WalletImport(ctx, ki)
}
func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) {
return c.Internal.MpoolGetNonce(ctx, addr)
}
func (c *FullNodeStruct) ChainGetBlock(ctx context.Context, b cid.Cid) (*types.BlockHeader, error) {
return c.Internal.ChainGetBlock(ctx, b)
}
func (c *FullNodeStruct) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) {
return c.Internal.ChainGetTipSet(ctx, key)
}
func (c *FullNodeStruct) ChainGetBlockMessages(ctx context.Context, b cid.Cid) (*api.BlockMessages, error) {
return c.Internal.ChainGetBlockMessages(ctx, b)
}
func (c *FullNodeStruct) ChainGetParentReceipts(ctx context.Context, b cid.Cid) ([]*types.MessageReceipt, error) {
return c.Internal.ChainGetParentReceipts(ctx, b)
}
func (c *FullNodeStruct) ChainGetParentMessages(ctx context.Context, b cid.Cid) ([]api.Message, error) {
return c.Internal.ChainGetParentMessages(ctx, b)
}
func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) {
return c.Internal.ChainNotify(ctx)
}
func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) {
return c.Internal.ChainReadObj(ctx, obj)
}
func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) {
return c.Internal.ChainHasObj(ctx, o)
}
func (c *FullNodeStruct) ChainStatObj(ctx context.Context, obj, base cid.Cid) (api.ObjStat, error) {
return c.Internal.ChainStatObj(ctx, obj, base)
}
func (c *FullNodeStruct) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error {
return c.Internal.ChainSetHead(ctx, tsk)
}
func (c *FullNodeStruct) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) {
return c.Internal.ChainGetGenesis(ctx)
}
func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.ChainTipSetWeight(ctx, tsk)
}
func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) {
return c.Internal.ChainGetNode(ctx, p)
}
func (c *FullNodeStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) {
return c.Internal.ChainGetMessage(ctx, mc)
}
func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) {
return c.Internal.ChainGetPath(ctx, from, to)
}
func (c *FullNodeStruct) ChainExport(ctx context.Context, tsk types.TipSetKey) (<-chan []byte, error) {
return c.Internal.ChainExport(ctx, tsk)
}
func (c *FullNodeStruct) SyncState(ctx context.Context) (*api.SyncState, error) {
return c.Internal.SyncState(ctx)
}
func (c *FullNodeStruct) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error {
return c.Internal.SyncSubmitBlock(ctx, blk)
}
func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) {
return c.Internal.SyncIncomingBlocks(ctx)
}
func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error {
return c.Internal.SyncMarkBad(ctx, bcid)
}
func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) {
return c.Internal.SyncCheckBad(ctx, bcid)
}
func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) {
return c.Internal.StateNetworkName(ctx)
}
func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, filter *abi.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk)
}
func (c *FullNodeStruct) StateMinerProvingSet(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) {
return c.Internal.StateMinerProvingSet(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) {
return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, tsk types.TipSetKey) (*api.MinerPower, error) {
return c.Internal.StateMinerPower(ctx, a, tsk)
}
func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) {
return c.Internal.StateMinerInfo(ctx, actor, tsk)
}
func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) (*miner.Deadlines, error) {
return c.Internal.StateMinerDeadlines(ctx, m, tsk)
}
func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) {
return c.Internal.StateMinerFaults(ctx, actor, tsk)
}
func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) {
return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk)
}
func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) {
return c.Internal.StateMinerRecoveries(ctx, actor, tsk)
}
func (c *FullNodeStruct) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, snum abi.SectorNumber, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.StateMinerInitialPledgeCollateral(ctx, maddr, snum, tsk)
}
func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk)
}
func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk)
}
func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) {
return c.Internal.StateCall(ctx, msg, tsk)
}
func (c *FullNodeStruct) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) {
return c.Internal.StateReplay(ctx, tsk, mc)
}
func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) {
return c.Internal.StateGetActor(ctx, actor, tsk)
}
func (c *FullNodeStruct) StateReadState(ctx context.Context, act *types.Actor, tsk types.TipSetKey) (*api.ActorState, error) {
return c.Internal.StateReadState(ctx, act, tsk)
}
func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.StatePledgeCollateral(ctx, tsk)
}
func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) {
return c.Internal.StateWaitMsg(ctx, msgc)
}
func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) {
return c.Internal.StateSearchMsg(ctx, msgc)
}
func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
return c.Internal.StateListMiners(ctx, tsk)
}
func (c *FullNodeStruct) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) {
return c.Internal.StateListActors(ctx, tsk)
}
func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) {
return c.Internal.StateMarketBalance(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) {
return c.Internal.StateMarketParticipants(ctx, tsk)
}
func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) {
return c.Internal.StateMarketDeals(ctx, tsk)
}
func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) {
return c.Internal.StateMarketStorageDeal(ctx, dealid, tsk)
}
func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
return c.Internal.StateLookupID(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) {
return c.Internal.StateAccountKey(ctx, addr, tsk)
}
func (c *FullNodeStruct) StateChangedActors(ctx context.Context, olnstate cid.Cid, newstate cid.Cid) (map[string]types.Actor, error) {
return c.Internal.StateChangedActors(ctx, olnstate, newstate)
}
func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) {
return c.Internal.StateGetReceipt(ctx, msg, tsk)
}
func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) {
return c.Internal.StateListMessages(ctx, match, tsk, toht)
}
func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) {
return c.Internal.StateCompute(ctx, height, msgs, tsk)
}
func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) {
return c.Internal.MsigGetAvailableBalance(ctx, a, tsk)
}
func (c *FullNodeStruct) MsigCreate(ctx context.Context, req int64, addrs []address.Address, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
return c.Internal.MsigCreate(ctx, req, addrs, val, src, gp)
}
func (c *FullNodeStruct) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
return c.Internal.MsigPropose(ctx, msig, to, amt, src, method, params)
}
func (c *FullNodeStruct) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
return c.Internal.MsigApprove(ctx, msig, txID, proposer, to, amt, src, method, params)
}
func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
return c.Internal.MsigCancel(ctx, msig, txID, proposer, to, amt, src, method, params)
}
func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt)
}
func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) {
return c.Internal.PaychGet(ctx, from, to, ensureFunds)
}
func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) {
return c.Internal.PaychList(ctx)
}
func (c *FullNodeStruct) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) {
return c.Internal.PaychStatus(ctx, pch)
}
func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *paych.SignedVoucher) error {
return c.Internal.PaychVoucherCheckValid(ctx, addr, sv)
}
func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) {
return c.Internal.PaychVoucherCheckSpendable(ctx, addr, sv, secret, proof)
}
func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) {
return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta)
}
func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*paych.SignedVoucher, error) {
return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane)
}
func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) {
return c.Internal.PaychVoucherList(ctx, pch)
}
func (c *FullNodeStruct) PaychClose(ctx context.Context, a address.Address) (cid.Cid, error) {
return c.Internal.PaychClose(ctx, a)
}
func (c *FullNodeStruct) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) {
return c.Internal.PaychAllocateLane(ctx, ch)
}
func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) {
return c.Internal.PaychNewPayment(ctx, from, to, vouchers)
}
func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (cid.Cid, error) {
return c.Internal.PaychVoucherSubmit(ctx, ch, sv)
}
// StorageMinerStruct
func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) {
return c.Internal.ActorAddress(ctx)
}
func (c *StorageMinerStruct) MiningBase(ctx context.Context) (*types.TipSet, error) {
return c.Internal.MiningBase(ctx)
}
func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) {
return c.Internal.ActorSectorSize(ctx, addr)
}
func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error {
return c.Internal.PledgeSector(ctx)
}
// Get the status of a given sector by ID
func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid abi.SectorNumber) (api.SectorInfo, error) {
return c.Internal.SectorsStatus(ctx, sid)
}
// List all staged sectors
func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) {
return c.Internal.SectorsList(ctx)
}
func (c *StorageMinerStruct) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) {
return c.Internal.SectorsRefs(ctx)
}
func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error {
return c.Internal.SectorsUpdate(ctx, id, state)
}
func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error {
return c.Internal.WorkerConnect(ctx, url)
}
func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]storiface.WorkerStats, error) {
return c.Internal.WorkerStats(ctx)
}
func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st stores.FsStat) error {
return c.Internal.StorageAttach(ctx, si, st)
}
func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error {
return c.Internal.StorageDeclareSector(ctx, storageId, s, ft)
}
func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error {
return c.Internal.StorageDropSector(ctx, storageId, s, ft)
}
func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, allowFetch bool) ([]stores.StorageInfo, error) {
return c.Internal.StorageFindSector(ctx, si, types, allowFetch)
}
func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) {
return c.Internal.StorageList(ctx)
}
func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) {
return c.Internal.StorageLocal(ctx)
}
func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error) {
return c.Internal.StorageStat(ctx, id)
}
func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) {
return c.Internal.StorageInfo(ctx, id)
}
func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredProof, sealing bool) ([]stores.StorageInfo, error) {
return c.Internal.StorageBestAlloc(ctx, allocate, spt, sealing)
}
func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error {
return c.Internal.StorageReportHealth(ctx, id, report)
}
func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error {
return c.Internal.MarketImportDealData(ctx, propcid, path)
}
func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) {
return c.Internal.MarketListDeals(ctx)
}
func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) {
return c.Internal.MarketListIncompleteDeals(ctx)
}
func (c *StorageMinerStruct) MarketSetPrice(ctx context.Context, p types.BigInt) error {
return c.Internal.MarketSetPrice(ctx, p)
}
func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error {
return c.Internal.DealsImportData(ctx, dealPropCid, file)
}
func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) {
return c.Internal.DealsList(ctx)
}
func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error {
return c.Internal.StorageAddLocal(ctx, path)
}
// WorkerStruct
func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) {
return w.Internal.Version(ctx)
}
func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) {
return w.Internal.TaskTypes(ctx)
}
func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) {
return w.Internal.Paths(ctx)
}
func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) {
return w.Internal.Info(ctx)
}
func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) {
return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces)
}
func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) {
return w.Internal.SealPreCommit2(ctx, sector, p1o)
}
func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) {
return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids)
}
func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) {
return w.Internal.SealCommit2(ctx, sector, c1o)
}
func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID) error {
return w.Internal.FinalizeSector(ctx, sector)
}
func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, b bool) error {
return w.Internal.Fetch(ctx, id, fileType, b)
}
func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
return w.Internal.Closing(ctx)
}
var _ api.Common = &CommonStruct{}
var _ api.FullNode = &FullNodeStruct{}
var _ api.StorageMiner = &StorageMinerStruct{}
var _ api.WorkerApi = &WorkerStruct{}

View File

@ -1,9 +0,0 @@
package apistruct
import "testing"
func TestPermTags(t *testing.T) {
_ = PermissionedFullAPI(&FullNodeStruct{})
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
_ = PermissionedWorkerAPI(&WorkerStruct{})
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,35 @@
// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT.
package api
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CheckStatusMessageSerialize-1]
_ = x[CheckStatusMessageSize-2]
_ = x[CheckStatusMessageValidity-3]
_ = x[CheckStatusMessageMinGas-4]
_ = x[CheckStatusMessageMinBaseFee-5]
_ = x[CheckStatusMessageBaseFee-6]
_ = x[CheckStatusMessageBaseFeeLowerBound-7]
_ = x[CheckStatusMessageBaseFeeUpperBound-8]
_ = x[CheckStatusMessageGetStateNonce-9]
_ = x[CheckStatusMessageNonce-10]
_ = x[CheckStatusMessageGetStateBalance-11]
_ = x[CheckStatusMessageBalance-12]
}
const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance"
var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202}
func (i CheckStatusCode) String() string {
i -= 1
if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) {
return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]]
}

View File

@ -1,60 +1,132 @@
package client
import (
"context"
"net/http"
"net/url"
"path"
"time"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/lib/rpcenc"
)
// NewCommonRPC creates a new http jsonrpc client.
func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
var res apistruct.CommonStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
[]interface{}{
&res.Internal,
},
// NewCommonRPCV0 creates a new http jsonrpc client.
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
var res v0api.CommonNetStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
return &res, closer, err
}
// NewFullNodeRPCV0 creates a new http jsonrpc client.
func NewFullNodeRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.FullNode, jsonrpc.ClientCloser, error) {
var res v0api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
return &res, closer, err
}
// NewFullNodeRPCV1 creates a new http jsonrpc client.
func NewFullNodeRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.FullNode, jsonrpc.ClientCloser, error) {
var res v1api.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, append([]jsonrpc.Option{jsonrpc.WithErrors(api.RPCErrors)}, opts...)...)
return &res, closer, err
}
func getPushUrl(addr string) (string, error) {
pushUrl, err := url.Parse(addr)
if err != nil {
return "", err
}
switch pushUrl.Scheme {
case "ws":
pushUrl.Scheme = "http"
case "wss":
pushUrl.Scheme = "https"
}
///rpc/v0 -> /rpc/streams/v0/push
pushUrl.Path = path.Join(pushUrl.Path, "../streams/v0/push")
return pushUrl.String(), nil
}
// NewStorageMinerRPCV0 creates a new http jsonrpc client for miner
func NewStorageMinerRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.StorageMiner, jsonrpc.ClientCloser, error) {
pushUrl, err := getPushUrl(addr)
if err != nil {
return nil, nil, err
}
var res v0api.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader,
append([]jsonrpc.Option{
rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithErrors(api.RPCErrors),
}, opts...)...)
return &res, closer, err
}
func NewWorkerRPCV0(ctx context.Context, addr string, requestHeader http.Header) (v0api.Worker, jsonrpc.ClientCloser, error) {
pushUrl, err := getPushUrl(addr)
if err != nil {
return nil, nil, err
}
var res api.WorkerStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res),
requestHeader,
rpcenc.ReaderParamEncoder(pushUrl),
jsonrpc.WithNoReconnect(),
jsonrpc.WithTimeout(30*time.Second),
jsonrpc.WithErrors(api.RPCErrors),
)
return &res, closer, err
}
// NewFullNodeRPC creates a new http jsonrpc client.
func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
var res apistruct.FullNodeStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
}, requestHeader)
return &res, closer, err
}
// NewStorageMinerRPC creates a new http jsonrpc client for storage miner
func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMiner, jsonrpc.ClientCloser, error) {
var res apistruct.StorageMinerStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
[]interface{}{
&res.CommonStruct.Internal,
&res.Internal,
},
// NewGatewayRPCV1 creates a new http jsonrpc client for a gateway node.
func NewGatewayRPCV1(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.Gateway, jsonrpc.ClientCloser, error) {
var res api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res),
requestHeader,
append(opts, jsonrpc.WithErrors(api.RPCErrors))...,
)
return &res, closer, err
}
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerApi, jsonrpc.ClientCloser, error) {
var res apistruct.WorkerStruct
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
[]interface{}{
&res.Internal,
},
// NewGatewayRPCV0 creates a new http jsonrpc client for a gateway node.
func NewGatewayRPCV0(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (v0api.Gateway, jsonrpc.ClientCloser, error) {
var res v0api.GatewayStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res),
requestHeader,
append(opts, jsonrpc.WithErrors(api.RPCErrors))...,
)
return &res, closer, err
}
func NewWalletRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Wallet, jsonrpc.ClientCloser, error) {
var res api.WalletStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res),
requestHeader,
jsonrpc.WithErrors(api.RPCErrors),
)
return &res, closer, err

View File

@ -0,0 +1,73 @@
package main
import (
"compress/gzip"
"encoding/json"
"io"
"log"
"os"
"github.com/filecoin-project/lotus/api/docgen"
docgen_openrpc "github.com/filecoin-project/lotus/api/docgen-openrpc"
)
/*
main defines a small program that writes an OpenRPC document describing
a Lotus API to stdout.
If the first argument is "miner", the document will describe the StorageMiner API.
If not (no, or any other args), the document will describe the Full API.
Use:
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"]
With gzip compression: a '-gzip' flag is made available as an optional third argument. Note that position matters.
go run ./api/openrpc/cmd ["api/api_full.go"|"api/api_storage.go"|"api/api_worker.go"] ["FullNode"|"StorageMiner"|"Worker"] -gzip
*/
func main() {
Comments, GroupDocs := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
doc := docgen_openrpc.NewLotusOpenRPCDocument(Comments, GroupDocs)
i, _, _ := docgen.GetAPIType(os.Args[2], os.Args[3])
doc.RegisterReceiverName("Filecoin", i)
out, err := doc.Discover()
if err != nil {
log.Fatalln(err)
}
var jsonOut []byte
var writer io.WriteCloser
// Use os.Args to handle a somewhat hacky flag for the gzip option.
// Could use flags package to handle this more cleanly, but that requires changes elsewhere
// the scope of which just isn't warranted by this one use case which will usually be run
// programmatically anyways.
if len(os.Args) > 5 && os.Args[5] == "-gzip" {
jsonOut, err = json.Marshal(out)
if err != nil {
log.Fatalln(err)
}
writer = gzip.NewWriter(os.Stdout)
} else {
jsonOut, err = json.MarshalIndent(out, "", " ")
if err != nil {
log.Fatalln(err)
}
writer = os.Stdout
}
_, err = writer.Write(jsonOut)
if err != nil {
log.Fatalln(err)
}
err = writer.Close()
if err != nil {
log.Fatalln(err)
}
}

View File

@ -0,0 +1,162 @@
package docgenopenrpc
import (
"encoding/json"
"go/ast"
"net"
"reflect"
"github.com/alecthomas/jsonschema"
go_openrpc_reflect "github.com/etclabscore/go-openrpc-reflect"
"github.com/ipfs/go-cid"
meta_schema "github.com/open-rpc/meta-schema"
"github.com/filecoin-project/lotus/api/docgen"
"github.com/filecoin-project/lotus/build"
)
// schemaDictEntry represents a type association passed to the jsonschema reflector.
type schemaDictEntry struct {
example interface{}
rawJson string
}
const integerD = `{
"title": "number",
"type": "number",
"description": "Number is a number"
}`
const cidCidD = `{"title": "Content Identifier", "type": "string", "description": "Cid represents a self-describing content addressed identifier. It is formed by a Version, a Codec (which indicates a multicodec-packed content type) and a Multihash."}`
func OpenRPCSchemaTypeMapper(ty reflect.Type) *jsonschema.Type {
unmarshalJSONToJSONSchemaType := func(input string) *jsonschema.Type {
var js jsonschema.Type
err := json.Unmarshal([]byte(input), &js)
if err != nil {
panic(err)
}
return &js
}
if ty.Kind() == reflect.Ptr {
ty = ty.Elem()
}
if ty == reflect.TypeOf((*interface{})(nil)).Elem() {
return &jsonschema.Type{Type: "object", AdditionalProperties: []byte("true")}
}
// Second, handle other types.
// Use a slice instead of a map because it preserves order, as a logic safeguard/fallback.
dict := []schemaDictEntry{
{cid.Cid{}, cidCidD},
}
for _, d := range dict {
if reflect.TypeOf(d.example) == ty {
tt := unmarshalJSONToJSONSchemaType(d.rawJson)
return tt
}
}
// Handle primitive types in case there are generic cases
// specific to our services.
switch ty.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// Return all integer types as the hex representation integer schemea.
ret := unmarshalJSONToJSONSchemaType(integerD)
return ret
case reflect.Uintptr:
return &jsonschema.Type{Type: "number", Title: "uintptr-title"}
case reflect.Struct:
case reflect.Map:
case reflect.Slice, reflect.Array:
case reflect.Float32, reflect.Float64:
case reflect.Bool:
case reflect.String:
case reflect.Ptr, reflect.Interface:
default:
}
return nil
}
// NewLotusOpenRPCDocument defines application-specific documentation and configuration for its OpenRPC document.
func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_reflect.Document {
d := &go_openrpc_reflect.Document{}
// Register "Meta" document fields.
// These include getters for
// - Servers object
// - Info object
// - ExternalDocs object
//
// These objects represent server-specific data that cannot be
// reflected.
d.WithMeta(&go_openrpc_reflect.MetaT{
GetServersFn: func() func(listeners []net.Listener) (*meta_schema.Servers, error) {
return func(listeners []net.Listener) (*meta_schema.Servers, error) {
return nil, nil
}
},
GetInfoFn: func() (info *meta_schema.InfoObject) {
info = &meta_schema.InfoObject{}
title := "Lotus RPC API"
info.Title = (*meta_schema.InfoObjectProperties)(&title)
version := build.BuildVersion
info.Version = (*meta_schema.InfoObjectVersion)(&version)
return info
},
GetExternalDocsFn: func() (exdocs *meta_schema.ExternalDocumentationObject) {
return nil // FIXME
},
})
// Use a provided Ethereum default configuration as a base.
appReflector := &go_openrpc_reflect.EthereumReflectorT{}
// Install overrides for the json schema->type map fn used by the jsonschema reflect package.
appReflector.FnSchemaTypeMap = func() func(ty reflect.Type) *jsonschema.Type {
return OpenRPCSchemaTypeMapper
}
appReflector.FnIsMethodEligible = func(m reflect.Method) bool {
for i := 0; i < m.Func.Type().NumOut(); i++ {
if m.Func.Type().Out(i).Kind() == reflect.Chan {
return false
}
}
return go_openrpc_reflect.EthereumReflector.IsMethodEligible(m)
}
appReflector.FnGetMethodName = func(moduleName string, r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
if m.Name == "ID" {
return moduleName + "_ID", nil
}
if moduleName == "rpc" && m.Name == "Discover" {
return "rpc.discover", nil
}
return moduleName + "." + m.Name, nil
}
appReflector.FnGetMethodSummary = func(r reflect.Value, m reflect.Method, funcDecl *ast.FuncDecl) (string, error) {
if v, ok := Comments[m.Name]; ok {
return v, nil
}
return "", nil // noComment
}
appReflector.FnSchemaExamples = func(ty reflect.Type) (examples *meta_schema.Examples, err error) {
v := docgen.ExampleValue("unknown", ty, ty) // This isn't ideal, but seems to work well enough.
return &meta_schema.Examples{
meta_schema.AlwaysTrue(v),
}, nil
}
// Finally, register the configured reflector to the document.
d.WithReflector(appReflector)
return d
}

121
api/docgen/cmd/docgen.go Normal file
View File

@ -0,0 +1,121 @@
package main
import (
"encoding/json"
"fmt"
"os"
"reflect"
"sort"
"strings"
"github.com/filecoin-project/lotus/api/docgen"
)
func main() {
comments, groupComments := docgen.ParseApiASTInfo(os.Args[1], os.Args[2], os.Args[3], os.Args[4])
groups := make(map[string]*docgen.MethodGroup)
_, t, permStruct := docgen.GetAPIType(os.Args[2], os.Args[3])
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
groupName := docgen.MethodGroupFromName(m.Name)
g, ok := groups[groupName]
if !ok {
g = new(docgen.MethodGroup)
g.Header = groupComments[groupName]
g.GroupName = groupName
groups[groupName] = g
}
var args []interface{}
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, docgen.ExampleValue(m.Name, inp, nil))
}
v, err := json.MarshalIndent(args, "", " ")
if err != nil {
panic(err)
}
outv := docgen.ExampleValue(m.Name, ft.Out(0), nil)
ov, err := json.MarshalIndent(outv, "", " ")
if err != nil {
panic(err)
}
g.Methods = append(g.Methods, &docgen.Method{
Name: m.Name,
Comment: comments[m.Name],
InputExample: string(v),
ResponseExample: string(ov),
})
}
var groupslice []*docgen.MethodGroup
for _, g := range groups {
groupslice = append(groupslice, g)
}
sort.Slice(groupslice, func(i, j int) bool {
return groupslice[i].GroupName < groupslice[j].GroupName
})
fmt.Printf("# Groups\n")
for _, g := range groupslice {
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
for _, method := range g.Methods {
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
}
}
for _, g := range groupslice {
g := g
fmt.Printf("## %s\n", g.GroupName)
fmt.Printf("%s\n\n", g.Header)
sort.Slice(g.Methods, func(i, j int) bool {
return g.Methods[i].Name < g.Methods[j].Name
})
for _, m := range g.Methods {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
var meth reflect.StructField
var ok bool
for _, ps := range permStruct {
meth, ok = ps.FieldByName(m.Name)
if ok {
break
}
}
if !ok {
panic("no perms for method: " + m.Name)
}
perms := meth.Tag.Get("perm")
fmt.Printf("Perms: %s\n\n", perms)
if strings.Count(m.InputExample, "\n") > 0 {
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
} else {
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
}
if strings.Count(m.ResponseExample, "\n") > 0 {
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
} else {
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
}
}
}
}

View File

@ -1,4 +1,4 @@
package main
package docgen
import (
"encoding/json"
@ -6,35 +6,56 @@ import (
"go/ast"
"go/parser"
"go/token"
"net/http"
"path/filepath"
"reflect"
"sort"
"strings"
"time"
"unicode"
"github.com/google/uuid"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
textselector "github.com/ipld/go-ipld-selector-text-lite"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/metrics"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/filestore"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
"github.com/filecoin-project/lotus/api/v0api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/types/ethtypes"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-filestore"
"github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/lotus/node/repo/imports"
sealing "github.com/filecoin-project/lotus/storage/pipeline"
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
var ExampleValues = map[reflect.Type]interface{}{
reflect.TypeOf(auth.Permission("")): auth.Permission("write"),
reflect.TypeOf(""): "string value",
reflect.TypeOf(uint64(42)): uint64(42),
reflect.TypeOf(byte(7)): byte(7),
reflect.TypeOf([]byte{}): []byte("byte array"),
reflect.TypeOf(api.MinerSubsystem(0)): api.MinerSubsystem(1),
reflect.TypeOf(auth.Permission("")): auth.Permission("write"),
reflect.TypeOf(""): "string value",
reflect.TypeOf(uint64(42)): uint64(42),
reflect.TypeOf(byte(7)): byte(7),
reflect.TypeOf([]byte{}): []byte("byte array"),
}
func addExample(v interface{}) {
@ -48,6 +69,7 @@ func init() {
}
ExampleValues[reflect.TypeOf(c)] = c
ExampleValues[reflect.TypeOf(&c)] = &c
c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve")
if err != nil {
@ -65,17 +87,31 @@ func init() {
ExampleValues[reflect.TypeOf(addr)] = addr
pid, err := peer.IDB58Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
if err != nil {
panic(err)
}
addExample(pid)
addExample(&pid)
storeIDExample := imports.ID(50)
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash")
clientEvent := retrievalmarket.ClientEventDealAccepted
block := blocks.Block(&blocks.BasicBlock{})
ExampleValues[reflect.TypeOf(&block).Elem()] = block
addExample(bitfield.NewFromSet([]uint64{5}))
addExample(abi.RegisteredProof_StackedDRG32GiBPoSt)
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1)
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
addExample(abi.ChainEpoch(10101))
addExample(crypto.SigTypeBLS)
addExample(types.KTBLS)
addExample(int64(9))
addExample(12.3)
addExample(123)
addExample(uintptr(0))
addExample(abi.MethodNum(1))
addExample(exitcode.ExitCode(0))
addExample(crypto.DomainSeparationTag_ElectionProofProduction)
@ -83,28 +119,78 @@ func init() {
addExample(abi.UnpaddedPieceSize(1024))
addExample(abi.UnpaddedPieceSize(1024).Padded())
addExample(abi.DealID(5432))
addExample(filestore.StatusFileChanged)
addExample(abi.SectorNumber(9))
addExample(abi.SectorSize(32 * 1024 * 1024 * 1024))
addExample(api.MpoolChange(0))
addExample(network.Connected)
addExample(dtypes.NetworkName("lotus"))
addExample(api.SyncStateStage(1))
addExample(build.APIVersion)
addExample(api.FullAPIVersion1)
addExample(api.PCHInbound)
addExample(time.Minute)
addExample(&types.ExecutionResult{
Msg: exampleValue(reflect.TypeOf(&types.Message{})).(*types.Message),
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{})).(*types.MessageReceipt),
addExample(graphsync.NewRequestID())
addExample(datatransfer.TransferID(3))
addExample(datatransfer.Ongoing)
addExample(storeIDExample)
addExample(&storeIDExample)
addExample(clientEvent)
addExample(&clientEvent)
addExample(retrievalmarket.ClientEventDealAccepted)
addExample(retrievalmarket.DealStatusNew)
addExample(&textSelExample)
addExample(&apiSelExample)
addExample(network.ReachabilityPublic)
addExample(build.TestNetworkVersion)
allocationId := verifreg.AllocationId(0)
addExample(allocationId)
addExample(&allocationId)
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
claimId := verifreg.ClaimId(0)
addExample(claimId)
addExample(&claimId)
addExample(map[verifreg.ClaimId]verifreg.Claim{})
addExample(map[string]int{"name": 42})
addExample(map[string]time.Time{"name": time.Unix(1615243938, 0).UTC()})
addExample(&types.ExecutionTrace{
Msg: ExampleValue("init", reflect.TypeOf(types.MessageTrace{}), nil).(types.MessageTrace),
MsgRct: ExampleValue("init", reflect.TypeOf(types.ReturnTrace{}), nil).(types.ReturnTrace),
})
addExample(map[string]types.Actor{
"t01236": exampleValue(reflect.TypeOf(types.Actor{})).(types.Actor),
"t01236": ExampleValue("init", reflect.TypeOf(types.Actor{}), nil).(types.Actor),
})
addExample(map[string]api.MarketDeal{
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{})).(api.MarketDeal),
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
})
addExample(map[string]*api.MarketDeal{
"t026363": ExampleValue("init", reflect.TypeOf(&api.MarketDeal{}), nil).(*api.MarketDeal),
})
addExample(map[string]api.MarketBalance{
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{})).(api.MarketBalance),
"t026363": ExampleValue("init", reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
})
addExample(map[string]*pubsub.TopicScoreSnapshot{
"/blocks": {
TimeInMesh: time.Minute,
FirstMessageDeliveries: 122,
MeshMessageDeliveries: 1234,
InvalidMessageDeliveries: 3,
},
})
addExample(map[string]metrics.Stats{
"12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": {
RateIn: 100,
RateOut: 50,
TotalIn: 174000,
TotalOut: 12500,
},
})
addExample(map[protocol.ID]metrics.Stats{
"/fil/hello/1.0.0": {
RateIn: 100,
RateOut: 50,
TotalIn: 174000,
TotalOut: 12500,
},
})
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
@ -115,9 +201,255 @@ func init() {
// because reflect.TypeOf(maddr) returns the concrete type...
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
// miner specific
addExample(filestore.Path(".lotusminer/fstmp123"))
si := uint64(12)
addExample(&si)
addExample(retrievalmarket.DealID(5))
addExample(abi.ActorID(1000))
addExample(map[string]cid.Cid{})
addExample(map[string][]api.SealedRef{
"98000": {
api.SealedRef{
SectorID: 100,
Offset: 10 << 20,
Size: 1 << 20,
},
},
})
addExample(api.SectorState(sealing.Proving))
addExample(storiface.ID("76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8"))
addExample(storiface.FTUnsealed)
addExample(storiface.PathSealing)
addExample(map[storiface.ID][]storiface.Decl{
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": {
{
SectorID: abi.SectorID{Miner: 1000, Number: 100},
SectorFileType: storiface.FTSealed,
},
},
})
addExample(map[storiface.ID]string{
"76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8": "/data/path",
})
addExample(map[uuid.UUID][]storiface.WorkerJob{
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
{
ID: storiface.CallID{
Sector: abi.SectorID{Miner: 1000, Number: 100},
ID: uuid.MustParse("76081ba0-61bd-45a5-bc08-af05f1c26e5d"),
},
Sector: abi.SectorID{Miner: 1000, Number: 100},
Task: sealtasks.TTPreCommit2,
RunWait: 0,
Start: time.Unix(1605172927, 0).UTC(),
Hostname: "host",
},
},
})
addExample(map[uuid.UUID]storiface.WorkerStats{
uuid.MustParse("ef8d99a2-6865-4189-8ffa-9fef0f806eee"): {
Info: storiface.WorkerInfo{
Hostname: "host",
Resources: storiface.WorkerResources{
MemPhysical: 256 << 30,
MemUsed: 2 << 30,
MemSwap: 120 << 30,
MemSwapUsed: 2 << 30,
CPUs: 64,
GPUs: []string{"aGPU 1337"},
Resources: storiface.ResourceTable,
},
},
Enabled: true,
MemUsedMin: 0,
MemUsedMax: 0,
GpuUsed: 0,
CpuUse: 0,
},
})
addExample(storiface.ErrorCode(0))
addExample(map[abi.SectorNumber]string{
123: "can't acquire read lock",
})
addExample(json.RawMessage(`"json raw message"`))
addExample(map[api.SectorState]int{
api.SectorState(sealing.Proving): 120,
})
addExample([]abi.SectorNumber{123, 124})
addExample([]storiface.SectorLock{
{
Sector: abi.SectorID{Number: 123, Miner: 1000},
Write: [storiface.FileTypes]uint{0, 0, 1},
Read: [storiface.FileTypes]uint{2, 3, 0},
},
})
storifaceid := storiface.ID("1399aa04-2625-44b1-bad4-bd07b59b22c4")
addExample(&storifaceid)
// worker specific
addExample(storiface.AcquireMove)
addExample(storiface.UnpaddedByteIndex(abi.PaddedPieceSize(1 << 20).Unpadded()))
addExample(map[sealtasks.TaskType]struct{}{
sealtasks.TTPreCommit2: {},
})
addExample(sealtasks.TTCommit2)
addExample(apitypes.OpenRPCDocument{
"openrpc": "1.2.6",
"info": map[string]interface{}{
"title": "Lotus RPC API",
"version": "1.2.1/generated=2020-11-22T08:22:42-06:00",
},
"methods": []interface{}{},
},
)
addExample(api.CheckStatusCode(0))
addExample(map[string]interface{}{"abc": 123})
addExample(api.MinerSubsystems{
api.SubsystemMining,
api.SubsystemSealing,
api.SubsystemSectorStorage,
api.SubsystemMarkets,
})
addExample(api.DagstoreShardResult{
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
Error: "<error>",
})
addExample(api.DagstoreShardInfo{
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
State: "ShardStateAvailable",
Error: "<error>",
})
addExample(storiface.ResourceTable)
addExample(network.ScopeStat{
Memory: 123,
NumStreamsInbound: 1,
NumStreamsOutbound: 2,
NumConnsInbound: 3,
NumConnsOutbound: 4,
NumFD: 5,
})
addExample(map[string]network.ScopeStat{
"abc": {
Memory: 123,
NumStreamsInbound: 1,
NumStreamsOutbound: 2,
NumConnsInbound: 3,
NumConnsOutbound: 4,
NumFD: 5,
},
})
addExample(api.NetLimit{
Memory: 123,
StreamsInbound: 1,
StreamsOutbound: 2,
Streams: 3,
ConnsInbound: 3,
ConnsOutbound: 4,
Conns: 4,
FD: 5,
})
addExample(map[string]bitfield.BitField{
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
})
addExample(&api.RaftStateData{
NonceMap: make(map[address.Address]uint64),
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
})
addExample(http.Header{
"Authorization": []string{"Bearer ey.."},
})
addExample(map[storiface.SectorFileType]storiface.SectorLocation{
storiface.FTSealed: {
Local: false,
URL: "https://example.com/sealingservice/sectors/s-f0123-12345",
Headers: nil,
},
})
ethint := ethtypes.EthUint64(5)
addExample(ethint)
addExample(&ethint)
ethaddr, _ := ethtypes.ParseEthAddress("0x5CbEeCF99d3fDB3f25E309Cc264f240bb0664031")
addExample(ethaddr)
addExample(&ethaddr)
ethhash, _ := ethtypes.EthHashFromCid(c)
addExample(ethhash)
addExample(&ethhash)
ethFeeHistoryReward := [][]ethtypes.EthBigInt{}
addExample(&ethFeeHistoryReward)
addExample(&uuid.UUID{})
filterid := ethtypes.EthFilterID(ethhash)
addExample(filterid)
addExample(&filterid)
subid := ethtypes.EthSubscriptionID(ethhash)
addExample(subid)
addExample(&subid)
pstring := func(s string) *string { return &s }
addExample(&ethtypes.EthFilterSpec{
FromBlock: pstring("2301220"),
Address: []ethtypes.EthAddress{ethaddr},
})
percent := types.Percent(123)
addExample(percent)
addExample(&percent)
}
func exampleValue(t reflect.Type) interface{} {
func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) {
switch pkg {
case "api": // latest
switch name {
case "FullNode":
i = &api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ api.FullNode })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "StorageMiner":
i = &api.StorageMinerStruct{}
t = reflect.TypeOf(new(struct{ api.StorageMiner })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.StorageMinerStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(api.NetStruct{}.Internal))
case "Worker":
i = &api.WorkerStruct{}
t = reflect.TypeOf(new(struct{ api.Worker })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.WorkerStruct{}.Internal))
case "Gateway":
i = &api.GatewayStruct{}
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
default:
panic("unknown type")
}
case "v0api":
switch name {
case "FullNode":
i = v0api.FullNodeStruct{}
t = reflect.TypeOf(new(struct{ v0api.FullNode })).Elem()
permStruct = append(permStruct, reflect.TypeOf(v0api.FullNodeStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.CommonStruct{}.Internal))
permStruct = append(permStruct, reflect.TypeOf(v0api.NetStruct{}.Internal))
default:
panic("unknown type")
}
}
return
}
func ExampleValue(method string, t, parent reflect.Type) interface{} {
v, ok := ExampleValues[t]
if ok {
return v
@ -126,41 +458,45 @@ func exampleValue(t reflect.Type) interface{} {
switch t.Kind() {
case reflect.Slice:
out := reflect.New(t).Elem()
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem())))
out = reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
return out.Interface()
case reflect.Chan:
return exampleValue(t.Elem())
return ExampleValue(method, t.Elem(), nil)
case reflect.Struct:
es := exampleStruct(t)
es := exampleStruct(method, t, parent)
v := reflect.ValueOf(es).Elem().Interface()
ExampleValues[t] = v
return v
case reflect.Array:
out := reflect.New(t).Elem()
for i := 0; i < t.Len(); i++ {
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem())))
out.Index(i).Set(reflect.ValueOf(ExampleValue(method, t.Elem(), t)))
}
return out.Interface()
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct {
es := exampleStruct(t.Elem())
//ExampleValues[t] = es
es := exampleStruct(method, t.Elem(), t)
ExampleValues[t] = es
return es
}
case reflect.Interface:
return struct{}{}
}
panic(fmt.Sprintf("No example value for type: %s", t))
panic(fmt.Sprintf("No example value for type: %s (method '%s')", t, method))
}
func exampleStruct(t reflect.Type) interface{} {
func exampleStruct(method string, t, parent reflect.Type) interface{} {
ns := reflect.New(t)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if strings.Title(f.Name) == f.Name {
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type)))
if f.Type == parent {
continue
}
if f.IsExported() {
ns.Elem().Field(i).Set(reflect.ValueOf(ExampleValue(method, f.Type, t)))
}
}
@ -168,6 +504,7 @@ func exampleStruct(t reflect.Type) interface{} {
}
type Visitor struct {
Root string
Methods map[string]ast.Node
}
@ -177,7 +514,7 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
if st.Name.Name != "FullNode" {
if st.Name.Name != v.Root {
return nil
}
@ -191,33 +528,43 @@ func (v *Visitor) Visit(node ast.Node) ast.Visitor {
return v
}
const noComment = "There are not yet any comments for this method."
func parseApiASTInfo() (map[string]string, map[string]string) {
const NoComment = "There are not yet any comments for this method."
func ParseApiASTInfo(apiFile, iface, pkg, dir string) (comments map[string]string, groupDocs map[string]string) { //nolint:golint
fset := token.NewFileSet()
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
apiDir, err := filepath.Abs(dir)
if err != nil {
fmt.Println("./api filepath absolute error: ", err)
return
}
apiFile, err = filepath.Abs(apiFile)
if err != nil {
fmt.Println("filepath absolute error: ", err, "file:", apiFile)
return
}
pkgs, err := parser.ParseDir(fset, apiDir, nil, parser.AllErrors|parser.ParseComments)
if err != nil {
fmt.Println("parse error: ", err)
return
}
ap := pkgs["api"]
ap := pkgs[pkg]
f := ap.Files["api/api_full.go"]
f := ap.Files[apiFile]
cmap := ast.NewCommentMap(fset, f, f.Comments)
v := &Visitor{make(map[string]ast.Node)}
ast.Walk(v, pkgs["api"])
v := &Visitor{iface, make(map[string]ast.Node)}
ast.Walk(v, ap)
groupDocs := make(map[string]string)
out := make(map[string]string)
comments = make(map[string]string)
groupDocs = make(map[string]string)
for mn, node := range v.Methods {
cs := cmap.Filter(node).Comments()
if len(cs) == 0 {
out[mn] = noComment
filteredComments := cmap.Filter(node).Comments()
if len(filteredComments) == 0 {
comments[mn] = NoComment
} else {
for _, c := range cs {
for _, c := range filteredComments {
if strings.HasPrefix(c.Text(), "MethodGroup:") {
parts := strings.Split(c.Text(), "\n")
groupName := strings.TrimSpace(parts[0][12:])
@ -228,15 +575,19 @@ func parseApiASTInfo() (map[string]string, map[string]string) {
}
}
last := cs[len(cs)-1].Text()
l := len(filteredComments) - 1
if len(filteredComments) > 1 {
l = len(filteredComments) - 2
}
last := filteredComments[l].Text()
if !strings.HasPrefix(last, "MethodGroup:") {
out[mn] = last
comments[mn] = last
} else {
out[mn] = noComment
comments[mn] = NoComment
}
}
}
return out, groupDocs
return comments, groupDocs
}
type MethodGroup struct {
@ -252,7 +603,7 @@ type Method struct {
ResponseExample string
}
func methodGroupFromName(mn string) string {
func MethodGroupFromName(mn string) string {
i := strings.IndexFunc(mn[1:], func(r rune) bool {
return unicode.IsUpper(r)
})
@ -261,78 +612,3 @@ func methodGroupFromName(mn string) string {
}
return mn[:i+1]
}
func main() {
comments, groupComments := parseApiASTInfo()
groups := make(map[string]*MethodGroup)
var api struct{ api.FullNode }
t := reflect.TypeOf(api)
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
groupName := methodGroupFromName(m.Name)
g, ok := groups[groupName]
if !ok {
g = new(MethodGroup)
g.Header = groupComments[groupName]
g.GroupName = groupName
groups[groupName] = g
}
var args []interface{}
ft := m.Func.Type()
for j := 2; j < ft.NumIn(); j++ {
inp := ft.In(j)
args = append(args, exampleValue(inp))
}
v, err := json.Marshal(args)
if err != nil {
panic(err)
}
outv := exampleValue(ft.Out(0))
ov, err := json.Marshal(outv)
if err != nil {
panic(err)
}
g.Methods = append(g.Methods, &Method{
Name: m.Name,
Comment: comments[m.Name],
InputExample: string(v),
ResponseExample: string(ov),
})
}
var groupslice []*MethodGroup
for _, g := range groups {
groupslice = append(groupslice, g)
}
sort.Slice(groupslice, func(i, j int) bool {
return groupslice[i].GroupName < groupslice[j].GroupName
})
for _, g := range groupslice {
fmt.Printf("## %s\n", g.GroupName)
fmt.Printf("%s\n\n", g.Header)
sort.Slice(g.Methods, func(i, j int) bool {
return g.Methods[i].Name < g.Methods[j].Name
})
for _, m := range g.Methods {
fmt.Printf("### %s\n", m.Name)
fmt.Printf("%s\n\n", m.Comment)
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
}
}
}

47
api/eth_aliases.go Normal file
View File

@ -0,0 +1,47 @@
package api
import apitypes "github.com/filecoin-project/lotus/api/types"
func CreateEthRPCAliases(as apitypes.Aliaser) {
// TODO: maybe use reflect to automatically register all the eth aliases
as.AliasMethod("eth_accounts", "Filecoin.EthAccounts")
as.AliasMethod("eth_blockNumber", "Filecoin.EthBlockNumber")
as.AliasMethod("eth_getBlockTransactionCountByNumber", "Filecoin.EthGetBlockTransactionCountByNumber")
as.AliasMethod("eth_getBlockTransactionCountByHash", "Filecoin.EthGetBlockTransactionCountByHash")
as.AliasMethod("eth_getBlockByHash", "Filecoin.EthGetBlockByHash")
as.AliasMethod("eth_getBlockByNumber", "Filecoin.EthGetBlockByNumber")
as.AliasMethod("eth_getTransactionByHash", "Filecoin.EthGetTransactionByHash")
as.AliasMethod("eth_getTransactionCount", "Filecoin.EthGetTransactionCount")
as.AliasMethod("eth_getTransactionReceipt", "Filecoin.EthGetTransactionReceipt")
as.AliasMethod("eth_getTransactionByBlockHashAndIndex", "Filecoin.EthGetTransactionByBlockHashAndIndex")
as.AliasMethod("eth_getTransactionByBlockNumberAndIndex", "Filecoin.EthGetTransactionByBlockNumberAndIndex")
as.AliasMethod("eth_getCode", "Filecoin.EthGetCode")
as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt")
as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance")
as.AliasMethod("eth_chainId", "Filecoin.EthChainId")
as.AliasMethod("eth_syncing", "Filecoin.EthSyncing")
as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory")
as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion")
as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas")
as.AliasMethod("eth_gasPrice", "Filecoin.EthGasPrice")
as.AliasMethod("eth_sendRawTransaction", "Filecoin.EthSendRawTransaction")
as.AliasMethod("eth_estimateGas", "Filecoin.EthEstimateGas")
as.AliasMethod("eth_call", "Filecoin.EthCall")
as.AliasMethod("eth_getLogs", "Filecoin.EthGetLogs")
as.AliasMethod("eth_getFilterChanges", "Filecoin.EthGetFilterChanges")
as.AliasMethod("eth_getFilterLogs", "Filecoin.EthGetFilterLogs")
as.AliasMethod("eth_newFilter", "Filecoin.EthNewFilter")
as.AliasMethod("eth_newBlockFilter", "Filecoin.EthNewBlockFilter")
as.AliasMethod("eth_newPendingTransactionFilter", "Filecoin.EthNewPendingTransactionFilter")
as.AliasMethod("eth_uninstallFilter", "Filecoin.EthUninstallFilter")
as.AliasMethod("eth_subscribe", "Filecoin.EthSubscribe")
as.AliasMethod("eth_unsubscribe", "Filecoin.EthUnsubscribe")
as.AliasMethod("net_version", "Filecoin.NetVersion")
as.AliasMethod("net_listening", "Filecoin.NetListening")
as.AliasMethod("web3_clientVersion", "Filecoin.Web3ClientVersion")
}

79
api/miner_subsystems.go Normal file
View File

@ -0,0 +1,79 @@
package api
import (
"encoding/json"
)
// MinerSubsystem represents a miner subsystem. Int and string values are not
// guaranteed to be stable over time is not
// guaranteed to be stable over time.
type MinerSubsystem int
const (
// SubsystemUnknown is a placeholder for the zero value. It should never
// be used.
SubsystemUnknown MinerSubsystem = iota
// SubsystemMarkets signifies the storage and retrieval
// deal-making subsystem.
SubsystemMarkets
// SubsystemMining signifies the mining subsystem.
SubsystemMining
// SubsystemSealing signifies the sealing subsystem.
SubsystemSealing
// SubsystemSectorStorage signifies the sector storage subsystem.
SubsystemSectorStorage
)
var MinerSubsystemToString = map[MinerSubsystem]string{
SubsystemUnknown: "Unknown",
SubsystemMarkets: "Markets",
SubsystemMining: "Mining",
SubsystemSealing: "Sealing",
SubsystemSectorStorage: "SectorStorage",
}
var MinerSubsystemToID = map[string]MinerSubsystem{
"Unknown": SubsystemUnknown,
"Markets": SubsystemMarkets,
"Mining": SubsystemMining,
"Sealing": SubsystemSealing,
"SectorStorage": SubsystemSectorStorage,
}
func (ms MinerSubsystem) MarshalJSON() ([]byte, error) {
return json.Marshal(MinerSubsystemToString[ms])
}
func (ms *MinerSubsystem) UnmarshalJSON(b []byte) error {
var j string
err := json.Unmarshal(b, &j)
if err != nil {
return err
}
s, ok := MinerSubsystemToID[j]
if !ok {
*ms = SubsystemUnknown
} else {
*ms = s
}
return nil
}
type MinerSubsystems []MinerSubsystem
func (ms MinerSubsystems) Has(entry MinerSubsystem) bool {
for _, v := range ms {
if v == entry {
return true
}
}
return false
}
func (ms MinerSubsystem) String() string {
s, ok := MinerSubsystemToString[ms]
if !ok {
return MinerSubsystemToString[SubsystemUnknown]
}
return s
}

4217
api/mocks/mock_full.go Normal file

File diff suppressed because it is too large Load Diff

48
api/permissioned.go Normal file
View File

@ -0,0 +1,48 @@
package api
import (
"github.com/filecoin-project/go-jsonrpc/auth"
)
const (
// When changing these, update docs/API.md too
PermRead auth.Permission = "read" // default
PermWrite auth.Permission = "write"
PermSign auth.Permission = "sign" // Use wallet keys for signing
PermAdmin auth.Permission = "admin" // Manage permissions
)
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
var DefaultPerms = []auth.Permission{PermRead}
func permissionedProxies(in, out interface{}) {
outs := GetInternalStructs(out)
for _, o := range outs {
auth.PermissionedProxy(AllPermissions, DefaultPerms, in, o)
}
}
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
var out StorageMinerStruct
permissionedProxies(a, &out)
return &out
}
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
permissionedProxies(a, &out)
return &out
}
func PermissionedWorkerAPI(a Worker) Worker {
var out WorkerStruct
permissionedProxies(a, &out)
return &out
}
func PermissionedWalletAPI(a Wallet) Wallet {
var out WalletStruct
permissionedProxies(a, &out)
return &out
}

7268
api/proxy_gen.go Normal file

File diff suppressed because it is too large Load Diff

30
api/proxy_util.go Normal file
View File

@ -0,0 +1,30 @@
package api
import "reflect"
var _internalField = "Internal"
// GetInternalStructs extracts all pointers to 'Internal' sub-structs from the provided pointer to a proxy struct
func GetInternalStructs(in interface{}) []interface{} {
return getInternalStructs(reflect.ValueOf(in).Elem())
}
func getInternalStructs(rv reflect.Value) []interface{} {
var out []interface{}
internal := rv.FieldByName(_internalField)
ii := internal.Addr().Interface()
out = append(out, ii)
for i := 0; i < rv.NumField(); i++ {
if rv.Type().Field(i).Name == _internalField {
continue
}
sub := getInternalStructs(rv.Field(i))
out = append(out, sub...)
}
return out
}

65
api/proxy_util_test.go Normal file
View File

@ -0,0 +1,65 @@
// stm: #unit
package api
import (
"testing"
"github.com/stretchr/testify/require"
)
type StrA struct {
StrB
Internal struct {
A int
}
}
type StrB struct {
Internal struct {
B int
}
}
type StrC struct {
Internal struct {
Internal struct {
C int
}
}
}
func TestGetInternalStructs(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
var proxy StrA
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 2)
sa := sts[0].(*struct{ A int })
sa.A = 3
sb := sts[1].(*struct{ B int })
sb.B = 4
require.Equal(t, 3, proxy.Internal.A)
require.Equal(t, 4, proxy.StrB.Internal.B)
}
func TestNestedInternalStructs(t *testing.T) {
//stm: @OTHER_IMPLEMENTATION_API_STRUCTS_001
var proxy StrC
// check that only the top-level internal struct gets picked up
sts := GetInternalStructs(&proxy)
require.Len(t, sts, 1)
sa := sts[0].(*struct {
Internal struct {
C int
}
})
sa.Internal.C = 5
require.Equal(t, 5, proxy.Internal.Internal.C)
}

View File

@ -1,254 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
logging "github.com/ipfs/go-log/v2"
"github.com/ipld/go-car"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/impl"
ipld "github.com/ipfs/go-ipld-format"
)
func init() {
logging.SetAllLoggers(logging.LevelInfo)
build.InsecurePoStValidation = true
}
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background()
n, sn := b(t, 1, oneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := true
done := make(chan struct{})
go func() {
defer close(done)
for mine {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
}
}
}()
makeDeal(t, ctx, 6, client, miner, carExport)
mine = false
fmt.Println("shutting down mining")
<-done
}
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
os.Setenv("BELLMAN_NO_GPU", "1")
ctx := context.Background()
n, sn := b(t, 1, oneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
mine := true
done := make(chan struct{})
go func() {
defer close(done)
for mine {
time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil {
t.Error(err)
}
}
}()
makeDeal(t, ctx, 6, client, miner, false)
makeDeal(t, ctx, 7, client, miner, false)
mine = false
fmt.Println("shutting down mining")
<-done
}
func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNodeAPI, miner TestStorageNode, carExport bool) {
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(rseed))).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, client, deal)
// Retrieval
testRetrieval(t, ctx, err, client, fcid, carExport, data)
}
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, fcid cid.Cid) *cid.Cid {
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
addr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{Root: fcid},
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
MinBlocksDuration: 100,
})
if err != nil {
t.Fatalf("%+v", err)
}
return deal
}
func waitDealSealed(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, deal *cid.Cid) {
loop:
for {
di, err := client.ClientGetDealInfo(ctx, *deal)
if err != nil {
t.Fatal(err)
}
switch di.State {
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
break loop
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
time.Sleep(time.Second / 2)
}
}
func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.FullNodeAPI, fcid cid.Cid, carExport bool, data []byte) {
offers, err := client.ClientFindData(ctx, fcid)
if err != nil {
t.Fatal(err)
}
if len(offers) < 1 {
t.Fatal("no offers")
}
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rpath)
caddr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
IsCAR: carExport,
}
err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref)
if err != nil {
t.Fatalf("%+v", err)
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
t.Fatal(err)
}
if carExport {
rdata = extractCarData(t, ctx, rdata, rpath)
}
if !bytes.Equal(rdata, data) {
t.Fatal("wrong data retrieved")
}
}
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
t.Fatal(err)
}
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
t.Fatal(err)
}
nd, err := ipld.Decode(b)
if err != nil {
t.Fatal(err)
}
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
t.Fatal(err)
}
return rdata
}

View File

@ -1,199 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"math/rand"
"os"
"sync/atomic"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/impl"
)
var log = logging.Logger("apitest")
func (ts *testSuite) testMining(t *testing.T) {
ctx := context.Background()
apis, sn := ts.makeNodes(t, 1, oneMiner)
api := apis[0]
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(0), h1.Height())
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
<-newHeads
err = sn[0].MineOne(ctx, func(bool) {})
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(1), h2.Height())
}
func (ts *testSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = false
defer func() {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
apis, sn := ts.makeNodes(t, 1, oneMiner)
api := apis[0]
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(0), h1.Height())
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
<-newHeads
err = sn[0].MineOne(ctx, func(bool) {})
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(1), h2.Height())
err = sn[0].MineOne(ctx, func(bool) {})
require.NoError(t, err)
<-newHeads
h2, err = api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(2), h2.Height())
}
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
os.Setenv("BELLMAN_NO_GPU", "1")
// test making a deal with a fresh miner, and see if it starts to mine
ctx := context.Background()
n, sn := b(t, 1, []StorageMiner{
{Full: 0, Preseal: PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for storage miners on non-first full node
})
client := n[0].FullNode.(*impl.FullNodeAPI)
provider := sn[1]
genesisMiner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
go func() {
defer close(done)
prevExpect := 0
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int, 2)
mdone := func(mined bool) {
go func() {
n := 0
if mined {
n = 1
}
wait <- n
}()
}
if err := sn[0].MineOne(ctx, mdone); err != nil {
t.Error(err)
}
if err := sn[1].MineOne(ctx, mdone); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
for {
n := 0
for i, node := range sn {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
if len(mb.Cids()) != expect {
log.Warnf("node %d mining base not complete (%d, want %d)", i, len(mb.Cids()), expect)
continue
}
n++
}
if n == len(sn) {
break
}
time.Sleep(blocktime)
}
if prevExpect == 2 && expect == 2 && minedTwo != nil {
close(minedTwo)
minedTwo = nil
}
prevExpect = expect
}
}()
deal := startDeal(t, ctx, provider, client, fcid)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, client, deal)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
}

View File

@ -1,124 +0,0 @@
package test
import (
"context"
"testing"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/stretchr/testify/assert"
)
type TestNode struct {
api.FullNode
}
type TestStorageNode struct {
api.StorageMiner
MineOne func(context.Context, func(bool)) error
}
var PresealGenesis = -1
type StorageMiner struct {
Full int
Preseal int
}
// APIBuilder is a function which is invoked in test suite to provide
// test nodes and networks
//
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner) ([]TestNode, []TestStorageNode)
type testSuite struct {
makeNodes APIBuilder
}
// TestApis is the entry point to API test suite
func TestApis(t *testing.T, b APIBuilder) {
ts := testSuite{
makeNodes: b,
}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
t.Run("testMiningReal", ts.testMiningReal)
}
var oneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
func (ts *testSuite) testVersion(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, 1, oneMiner)
api := apis[0]
v, err := api.Version(ctx)
if err != nil {
t.Fatal(err)
}
if v.Version != build.BuildVersion {
t.Error("Version didn't work properly")
}
}
func (ts *testSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, 1, oneMiner)
api := apis[0]
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *testSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, 2, oneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
}

View File

@ -2,11 +2,29 @@ package api
import (
"encoding/json"
"fmt"
"time"
"github.com/google/uuid"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-graphsync"
"github.com/ipld/go-ipld-prime"
"github.com/ipld/go-ipld-prime/codec/dagjson"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
// TODO: check if this exists anywhere else
type MultiaddrSlice []ma.Multiaddr
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
@ -32,3 +50,359 @@ type ObjStat struct {
Size uint64
Links uint64
}
type PubsubScore struct {
ID peer.ID
Score *pubsub.PeerScoreSnapshot
}
type MessageSendSpec struct {
MaxFee abi.TokenAmount
MsgUuid uuid.UUID
}
type MpoolMessageWhole struct {
Msg *types.Message
Spec *MessageSendSpec
}
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
type GraphSyncDataTransfer struct {
// GraphSync request id for this transfer
RequestID *graphsync.RequestID
// Graphsync state for this transfer
RequestState string
// If a channel ID is present, indicates whether this is the current graphsync request for this channel
// (could have changed in a restart)
IsCurrentChannelRequest bool
// Data transfer channel ID for this transfer
ChannelID *datatransfer.ChannelID
// Data transfer state for this transfer
ChannelState *DataTransferChannel
// Diagnostic information about this request -- and unexpected inconsistencies in
// request state
Diagnostics []string
}
// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging
type TransferDiagnostics struct {
ReceivingTransfers []*GraphSyncDataTransfer
SendingTransfers []*GraphSyncDataTransfer
}
type DataTransferChannel struct {
TransferID datatransfer.TransferID
Status datatransfer.Status
BaseCID cid.Cid
IsInitiator bool
IsSender bool
Voucher string
Message string
OtherPeer peer.ID
Transferred uint64
Stages *datatransfer.ChannelStages
}
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel {
channel := DataTransferChannel{
TransferID: channelState.TransferID(),
Status: channelState.Status(),
BaseCID: channelState.BaseCID(),
IsSender: channelState.Sender() == hostID,
Message: channelState.Message(),
}
voucher := channelState.Voucher()
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
if err != nil {
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
} else {
channel.Voucher = string(voucherJSON)
}
if channel.IsSender {
channel.IsInitiator = !channelState.IsPull()
channel.Transferred = channelState.Sent()
channel.OtherPeer = channelState.Recipient()
} else {
channel.IsInitiator = channelState.IsPull()
channel.Transferred = channelState.Received()
channel.OtherPeer = channelState.Sender()
}
return channel
}
type NetStat struct {
System *network.ScopeStat `json:",omitempty"`
Transient *network.ScopeStat `json:",omitempty"`
Services map[string]network.ScopeStat `json:",omitempty"`
Protocols map[string]network.ScopeStat `json:",omitempty"`
Peers map[string]network.ScopeStat `json:",omitempty"`
}
type NetLimit struct {
Memory int64 `json:",omitempty"`
Streams, StreamsInbound, StreamsOutbound int
Conns, ConnsInbound, ConnsOutbound int
FD int
}
type NetBlockList struct {
Peers []peer.ID
IPAddrs []string
IPSubnets []string
}
type ExtendedPeerInfo struct {
ID peer.ID
Agent string
Addrs []string
Protocols []string
ConnMgrMeta *ConnMgrInfo
}
type ConnMgrInfo struct {
FirstSeen time.Time
Value int
Tags map[string]int
Conns map[string]time.Time
}
type NodeStatus struct {
SyncStatus NodeSyncStatus
PeerStatus NodePeerStatus
ChainStatus NodeChainStatus
}
type NodeSyncStatus struct {
Epoch uint64
Behind uint64
}
type NodePeerStatus struct {
PeersToPublishMsgs int
PeersToPublishBlocks int
}
type NodeChainStatus struct {
BlocksPerTipsetLast100 float64
BlocksPerTipsetLastFinality float64
}
type CheckStatusCode int
//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus
const (
_ CheckStatusCode = iota
// Message Checks
CheckStatusMessageSerialize
CheckStatusMessageSize
CheckStatusMessageValidity
CheckStatusMessageMinGas
CheckStatusMessageMinBaseFee
CheckStatusMessageBaseFee
CheckStatusMessageBaseFeeLowerBound
CheckStatusMessageBaseFeeUpperBound
CheckStatusMessageGetStateNonce
CheckStatusMessageNonce
CheckStatusMessageGetStateBalance
CheckStatusMessageBalance
)
type CheckStatus struct {
Code CheckStatusCode
OK bool
Err string
Hint map[string]interface{}
}
type MessageCheckStatus struct {
Cid cid.Cid
CheckStatus
}
type MessagePrototype struct {
Message types.Message
ValidNonce bool
}
type RetrievalInfo struct {
PayloadCID cid.Cid
ID retrievalmarket.DealID
PieceCID *cid.Cid
PricePerByte abi.TokenAmount
UnsealPrice abi.TokenAmount
Status retrievalmarket.DealStatus
Message string // more information about deal state, particularly errors
Provider peer.ID
BytesReceived uint64
BytesPaidFor uint64
TotalPaid abi.TokenAmount
TransferChannelID *datatransfer.ChannelID
DataTransfer *DataTransferChannel
// optional event if part of ClientGetRetrievalUpdates
Event *retrievalmarket.ClientEvent
}
type RestrievalRes struct {
DealID retrievalmarket.DealID
}
// Selector specifies ipld selector string
// - if the string starts with '{', it's interpreted as json selector string
// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/
// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path)
// see https://github.com/ipld/go-ipld-selector-text-lite
type Selector string
type DagSpec struct {
// DataSelector matches data to be retrieved
// - when using textselector, the path specifies subtree
// - the matched graph must have a single root
DataSelector *Selector
// ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector
// When true, in addition to the selection target, the resulting CAR will contain every block along the
// path back to, and including the original root
// When false the resulting CAR contains only the blocks of the target subdag
ExportMerkleProof bool
}
type ExportRef struct {
Root cid.Cid
// DAGs array specifies a list of DAGs to export
// - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node
// - If exporting into a car file
// - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root
// - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car
// - When not specified defaults to a single DAG:
// - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}`
DAGs []DagSpec
FromLocalCAR string // if specified, get data from a local CARv2 file.
DealID retrievalmarket.DealID
}
type MinerInfo struct {
Owner address.Address // Must be an ID-address.
Worker address.Address // Must be an ID-address.
NewWorker address.Address // Must be an ID-address.
ControlAddresses []address.Address // Must be an ID-addresses.
WorkerChangeEpoch abi.ChainEpoch
PeerId *peer.ID
Multiaddrs []abi.Multiaddrs
WindowPoStProofType abi.RegisteredPoStProof
SectorSize abi.SectorSize
WindowPoStPartitionSectors uint64
ConsensusFaultElapsed abi.ChainEpoch
Beneficiary address.Address
BeneficiaryTerm *miner.BeneficiaryTerm
PendingBeneficiaryTerm *miner.PendingBeneficiaryChange
}
type NetworkParams struct {
NetworkName dtypes.NetworkName
BlockDelaySecs uint64
ConsensusMinerMinPower abi.StoragePower
SupportedProofTypes []abi.RegisteredSealProof
PreCommitChallengeDelay abi.ChainEpoch
ForkUpgradeParams ForkUpgradeParams
}
type ForkUpgradeParams struct {
UpgradeSmokeHeight abi.ChainEpoch
UpgradeBreezeHeight abi.ChainEpoch
UpgradeIgnitionHeight abi.ChainEpoch
UpgradeLiftoffHeight abi.ChainEpoch
UpgradeAssemblyHeight abi.ChainEpoch
UpgradeRefuelHeight abi.ChainEpoch
UpgradeTapeHeight abi.ChainEpoch
UpgradeKumquatHeight abi.ChainEpoch
BreezeGasTampingDuration abi.ChainEpoch
UpgradeCalicoHeight abi.ChainEpoch
UpgradePersianHeight abi.ChainEpoch
UpgradeOrangeHeight abi.ChainEpoch
UpgradeClausHeight abi.ChainEpoch
UpgradeTrustHeight abi.ChainEpoch
UpgradeNorwegianHeight abi.ChainEpoch
UpgradeTurboHeight abi.ChainEpoch
UpgradeHyperdriveHeight abi.ChainEpoch
UpgradeChocolateHeight abi.ChainEpoch
UpgradeOhSnapHeight abi.ChainEpoch
UpgradeSkyrHeight abi.ChainEpoch
UpgradeSharkHeight abi.ChainEpoch
UpgradeHyggeHeight abi.ChainEpoch
UpgradeLightningHeight abi.ChainEpoch
UpgradeThunderHeight abi.ChainEpoch
}
type NonceMapType map[address.Address]uint64
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
type RaftStateData struct {
NonceMap NonceMapType
MsgUuids MsgUuidMapType
}
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
marshalled := make(map[string]uint64)
for a, n := range *n {
marshalled[a.String()] = n
}
return json.Marshal(marshalled)
}
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
unmarshalled := make(map[string]uint64)
err := json.Unmarshal(b, &unmarshalled)
if err != nil {
return err
}
*n = make(map[address.Address]uint64)
for saddr, nonce := range unmarshalled {
a, err := address.NewFromString(saddr)
if err != nil {
return err
}
(*n)[a] = nonce
}
return nil
}
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
marshalled := make(map[string]*types.SignedMessage)
for u, msg := range *m {
marshalled[u.String()] = msg
}
return json.Marshal(marshalled)
}
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
unmarshalled := make(map[string]*types.SignedMessage)
err := json.Unmarshal(b, &unmarshalled)
if err != nil {
return err
}
*m = make(map[uuid.UUID]*types.SignedMessage)
for suid, msg := range unmarshalled {
u, err := uuid.Parse(suid)
if err != nil {
return err
}
(*m)[u] = msg
}
return nil
}
// ChainExportConfig holds configuration for chain ranged exports.
type ChainExportConfig struct {
WriteBufferSize int
NumWorkers int
IncludeMessages bool
IncludeReceipts bool
IncludeStateRoots bool
}

5
api/types/actors.go Normal file
View File

@ -0,0 +1,5 @@
package apitypes
import "github.com/filecoin-project/go-state-types/network"
type NetworkVersion = network.Version

3
api/types/openrpc.go Normal file
View File

@ -0,0 +1,3 @@
package apitypes
type OpenRPCDocument map[string]interface{}

5
api/types/rpc.go Normal file
View File

@ -0,0 +1,5 @@
package apitypes
type Aliaser interface {
AliasMethod(alias, original string)
}

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/go-state-types/crypto"
)
type SignFunc = func(context.Context, []byte) (*crypto.Signature, error)

776
api/v0api/full.go Normal file
View File

@ -0,0 +1,776 @@
package v0api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
textselector "github.com/ipld/go-ipld-selector-text-lite"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types"
lminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
"github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo/imports"
)
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
// you'll need to make sure they are also present on the V1 (Unstable) API
//
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
// by the V1 api
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
// FullNode API is a low-level interface to the Filecoin network full node
type FullNode interface {
Common
Net
// MethodGroup: Chain
// The Chain method group contains methods for interacting with the
// blockchain, but that do not require any form of state computation.
// ChainNotify returns channel with chain head updates.
// First message is guaranteed to be of len == 1, and type == 'current'.
ChainNotify(context.Context) (<-chan []*api.HeadChange, error) //perm:read
// ChainHead returns the current head of the chain.
ChainHead(context.Context) (*types.TipSet, error) //perm:read
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) //perm:read
// ChainGetBlock returns the block specified by the given CID.
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) //perm:read
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainGetBlockMessages returns messages stored in the specified block.
//
// Note: If there are multiple blocks in a tipset, it's likely that some
// messages will be duplicated. It's also possible for blocks in a tipset to have
// different messages from the same sender at the same nonce. When that happens,
// only the first message (in a block with lowest ticket) will be considered
// for execution
//
// NOTE: THIS METHOD SHOULD ONLY BE USED FOR GETTING MESSAGES IN A SPECIFIC BLOCK
//
// DO NOT USE THIS METHOD TO GET MESSAGES INCLUDED IN A TIPSET
// Use ChainGetParentMessages, which will perform correct message deduplication
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) //perm:read
// ChainGetParentReceipts returns receipts for messages in parent tipset of
// the specified block. The receipts in the list returned is one-to-one with the
// messages returned by a call to ChainGetParentMessages with the same blockCid.
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) //perm:read
// ChainGetParentMessages returns messages stored in parent tipset of the
// specified block.
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) //perm:read
// ChainGetMessagesInTipset returns message stores in current tipset
ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) //perm:read
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
// will be returned.
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) //perm:read
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
// blockstore and returns raw bytes.
ChainReadObj(context.Context, cid.Cid) ([]byte, error) //perm:read
// ChainDeleteObj deletes node referenced by the given CID
ChainDeleteObj(context.Context, cid.Cid) error //perm:admin
// ChainPutObj puts and object into the blockstore
ChainPutObj(context.Context, blocks.Block) error
// ChainHasObj checks if a given CID exists in the chain blockstore.
ChainHasObj(context.Context, cid.Cid) (bool, error) //perm:read
// ChainStatObj returns statistics about the graph referenced by 'obj'.
// If 'base' is also specified, then the returned stat will be a diff
// between the two objects.
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) //perm:read
// ChainSetHead forcefully sets current chain head. Use with caution.
ChainSetHead(context.Context, types.TipSetKey) error //perm:admin
// ChainGetGenesis returns the genesis tipset.
ChainGetGenesis(context.Context) (*types.TipSet, error) //perm:read
// ChainTipSetWeight computes weight for the specified tipset.
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) //perm:read
ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) //perm:read
// ChainGetMessage reads a message referenced by the specified CID from the
// chain blockstore.
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) //perm:read
// ChainGetPath returns a set of revert/apply operations needed to get from
// one tipset to another, for example:
// ```
// to
// ^
// from tAA
// ^ ^
// tBA tAB
// ^---*--^
// ^
// tRR
// ```
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) //perm:read
// ChainExport returns a stream of bytes with CAR dump of chain data.
// The exported chain data includes the header chain from the given tipset
// back to genesis, the entire genesis state, and the most recent 'nroots'
// state trees.
// If oldmsgskip is set, messages from before the requested roots are also not included.
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error) //perm:read
// MethodGroup: Beacon
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
// the entry has not yet been produced, the call will block until the entry
// becomes available
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) //perm:read
// GasEstimateFeeCap estimates gas fee cap
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateGasLimit estimates gas used by the message and returns it.
// It fails if message fails to execute.
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error) //perm:read
// GasEstimateGasPremium estimates what gas price should be used for a
// message to have high likelihood of inclusion in `nblocksincl` epochs.
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) //perm:read
// GasEstimateMessageGas estimates gas values for unset message gas fields
GasEstimateMessageGas(context.Context, *types.Message, *api.MessageSendSpec, types.TipSetKey) (*types.Message, error) //perm:read
// MethodGroup: Sync
// The Sync method group contains methods for interacting with and
// observing the lotus sync service.
// SyncState returns the current status of the lotus sync system.
SyncState(context.Context) (*api.SyncState, error) //perm:read
// SyncSubmitBlock can be used to submit a newly created block to the.
// network through this node
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error //perm:write
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
// yet synced block headers.
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) //perm:read
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error //perm:admin
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
// Use with extreme caution.
SyncMarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error //perm:admin
// SyncUnmarkAllBad purges bad block cache, making it possible to sync to chains previously marked as bad
SyncUnmarkAllBad(ctx context.Context) error //perm:admin
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
// the reason.
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) //perm:read
// SyncValidateTipset indicates whether the provided tipset is valid or not
SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) //perm:read
// MethodGroup: Mpool
// The Mpool methods are for interacting with the message pool. The message pool
// manages all incoming and outgoing 'messages' going over the network.
// MpoolPending returns pending mempool messages.
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) //perm:read
// MpoolSelect returns a list of pending messages for inclusion in the next block
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error) //perm:read
// MpoolPush pushes a signed message to mempool.
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushUntrusted pushes a signed message to mempool from untrusted sources.
MpoolPushUntrusted(context.Context, *types.SignedMessage) (cid.Cid, error) //perm:write
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
// to mempool.
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
//
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
// based on current chain conditions
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec) (*types.SignedMessage, error) //perm:sign
// MpoolBatchPush batch pushes a signed message to mempool.
MpoolBatchPush(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources.
MpoolBatchPushUntrusted(context.Context, []*types.SignedMessage) ([]cid.Cid, error) //perm:write
// MpoolBatchPushMessage batch pushes a unsigned message to mempool.
MpoolBatchPushMessage(context.Context, []*types.Message, *api.MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
// MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
MpoolSub(context.Context) (<-chan api.MpoolUpdate, error) //perm:read
// MpoolClear clears pending messages from the mpool
MpoolClear(context.Context, bool) error //perm:write
// MpoolGetConfig returns (a copy of) the current mpool config
MpoolGetConfig(context.Context) (*types.MpoolConfig, error) //perm:read
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
MpoolSetConfig(context.Context, *types.MpoolConfig) error //perm:admin
// MethodGroup: Miner
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) //perm:read
MinerCreateBlock(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) //perm:write
// // UX ?
// MethodGroup: Wallet
// WalletNew creates a new address in the wallet with the given sigType.
// Available key types: bls, secp256k1, secp256k1-ledger
// Support for numerical types: 1 - secp256k1, 2 - BLS is deprecated
WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:write
// WalletHas indicates whether the given address is in the wallet.
WalletHas(context.Context, address.Address) (bool, error) //perm:write
// WalletList lists all the addresses in the wallet.
WalletList(context.Context) ([]address.Address, error) //perm:write
// WalletBalance returns the balance of the given address at the current head of the chain.
WalletBalance(context.Context, address.Address) (types.BigInt, error) //perm:read
// WalletSign signs the given bytes using the given address.
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) //perm:sign
// WalletSignMessage signs the given message using the given address.
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) //perm:sign
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
// The address does not have to be in the wallet.
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one.
WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
// WalletDelete deletes an address from the wallet.
WalletDelete(context.Context, address.Address) error //perm:admin
// WalletValidateAddress validates whether a given string can be decoded as a well-formed address
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
// Other
// MethodGroup: Client
// The Client methods all have to do with interacting with the storage and
// retrieval markets as a client
// ClientImport imports file under the specified path into filestore.
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
// ClientRemoveImport removes file import
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
// ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
// ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client.
ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
// ClientGetDealUpdates returns the status of updated deals
ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
// ClientGetDealStatus returns status given a code
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
// ClientHasLocal indicates whether a certain CID is locally stored.
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
// ClientGetRetrievalUpdates returns status of updated retrieval deals
ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
// ClientCalcCommP calculates the CommP for a specified file
ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
// ClientGenCar generates a CAR file for the specified file.
ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
// ClientDealSize calculates real deal data size
ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
// ClientListTransfers returns the status of all ongoing transfers of data
ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
// which are stuck due to insufficient funds
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
// ClientUnimport removes references to the specified file from filestore
// ClientUnimport(path string)
// ClientListImports lists imported files and their root CIDs
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
// ClientListAsks() []Ask
// MethodGroup: State
// The State methods are used to query, inspect, and interact with chain state.
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
// StateCall runs the given message and returns its result without any persisted changes.
//
// StateCall applies the message to the tipset's parent state. The
// message is not applied on-top-of the messages in the passed-in
// tipset.
StateCall(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) //perm:read
// StateReplay replays a given message, assuming it was included in a block in the specified tipset.
//
// If a tipset key is provided, and a replacing message is not found on chain,
// the method will return an error saying that the message wasn't found
//
// If no tipset key is provided, the appropriate tipset is looked up, and if
// the message was gas-repriced, the on-chain message will be replayed - in
// that case the returned InvocResult.MsgCid will not match the Cid param
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that InvocResult.MsgCid is equal to the provided Cid.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) //perm:read
// StateGetActor returns the indicated actor's nonce and balance.
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) //perm:read
// StateReadState returns the indicated actor's state.
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) //perm:read
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) //perm:read
// StateDecodeParams attempts to decode the provided params, based on the recipient actor address and method number.
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error) //perm:read
// StateNetworkName returns the name of the network the node is synced to
StateNetworkName(context.Context) (dtypes.NetworkName, error) //perm:read
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) //perm:read
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
// and returns the deadline-related calculations.
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) //perm:read
// StateMinerPower returns the power of the indicated miner
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) //perm:read
// StateMinerInfo returns info about the indicated miner
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) //perm:read
// StateMinerDeadlines returns all the proving deadlines for the given miner
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) //perm:read
// StateMinerPartitions returns all partitions in the specified deadline
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]api.Partition, error) //perm:read
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*api.Fault, error) //perm:read
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error) //perm:read
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// StateMinerSectorAllocated checks if a sector is allocated
StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (bool, error) //perm:read
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) //perm:read
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
// expiration epoch
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) //perm:read
// StateSectorExpiration returns epoch at which given sector will expire
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*lminer.SectorExpiration, error) //perm:read
// StateSectorPartition finds deadline/partition with the specified sector
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*lminer.SectorLocation, error) //perm:read
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsg(context.Context, cid.Cid) (*api.MsgLookup, error) //perm:read
// StateSearchMsgLimited looks back up to limit epochs in the chain for a message, and returns its receipt and the tipset where it was executed
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
// message arrives on chain, and gets to the indicated confidence depth.
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) //perm:read
// StateWaitMsgLimited looks back up to limit epochs in the chain for a message.
// If not found, it blocks until the message arrives on chain, and gets to the
// indicated confidence depth.
//
// NOTE: If a replacing message is found on chain, this method will return
// a MsgLookup for the replacing message - the MsgLookup.Message will be a different
// CID than the one provided in the 'cid' param, MsgLookup.Receipt will contain the
// result of the execution of the replacing message.
//
// If the caller wants to ensure that exactly the requested message was executed,
// they MUST check that MsgLookup.Message is equal to the provided 'cid'.
// Without this check both the requested and original message may appear as
// successfully executed on-chain, which may look like a double-spend.
//
// A replacing message is a message with a different CID, any of Gas values, and
// different signature, but with all other parameters matching (source/destination,
// nonce, params, etc.)
StateWaitMsgLimited(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) //perm:read
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateListActors returns the addresses of every actor in the state
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) //perm:read
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) //perm:read
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) //perm:read
// StateMarketDeals returns information about every deal in the Storage Market
StateMarketDeals(context.Context, types.TipSetKey) (map[string]*api.MarketDeal, error) //perm:read
// StateMarketStorageDeal returns information about the indicated deal
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) //perm:read
// StateGetAllocationForPendingDeal returns the allocation for a given deal ID of a pending deal.
StateGetAllocationForPendingDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocation returns the allocation for a given address and allocation ID.
StateGetAllocation(ctx context.Context, clientAddr address.Address, allocationId verifregtypes.AllocationId, tsk types.TipSetKey) (*verifregtypes.Allocation, error) //perm:read
// StateGetAllocations returns the all the allocations for a given client.
StateGetAllocations(ctx context.Context, clientAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.AllocationId]verifregtypes.Allocation, error) //perm:read
// StateGetClaim returns the claim for a given address and claim ID.
StateGetClaim(ctx context.Context, providerAddr address.Address, claimId verifregtypes.ClaimId, tsk types.TipSetKey) (*verifregtypes.Claim, error) //perm:read
// StateGetClaims returns the all the claims for a given provider.
StateGetClaims(ctx context.Context, providerAddr address.Address, tsk types.TipSetKey) (map[verifregtypes.ClaimId]verifregtypes.Claim, error) //perm:read
// StateLookupID retrieves the ID address of the given address
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateAccountKey returns the public key address of the given ID address
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) //perm:read
// StateChangedActors returns all the actors whose states change between the two given state CIDs
// TODO: Should this take tipset keys instead?
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) //perm:read
// StateGetReceipt returns the message receipt for the given message or for a
// matching gas-repriced replacing message
//
// NOTE: If the requested message was replaced, this method will return the receipt
// for the replacing message - if the caller needs the receipt for exactly the
// requested message, use StateSearchMsg().Receipt, and check that MsgLookup.Message
// is matching the requested CID
//
// DEPRECATED: Use StateSearchMsg, this method won't be supported in v1 API
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) //perm:read
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) //perm:read
// StateCompute is a flexible command that applies the given messages on the given tipset.
// The messages are run as though the VM were at the provided height.
//
// When called, StateCompute will:
// - Load the provided tipset, or use the current chain head if not provided
// - Compute the tipset state of the provided tipset on top of the parent state
// - (note that this step runs before vmheight is applied to the execution)
// - Execute state upgrade if any were scheduled at the epoch, or in null
// blocks preceding the tipset
// - Call the cron actor on null blocks preceding the tipset
// - For each block in the tipset
// - Apply messages in blocks in the specified
// - Award block reward by calling the reward actor
// - Call the cron actor for the current epoch
// - If the specified vmheight is higher than the current epoch, apply any
// needed state upgrades to the state
// - Apply the specified messages to the state
//
// The vmheight parameter sets VM execution epoch, and can be used to simulate
// message execution in different network versions. If the specified vmheight
// epoch is higher than the epoch of the specified tipset, any state upgrades
// until the vmheight will be executed on the state before applying messages
// specified by the user.
//
// Note that the initial tipset state computation is not affected by the
// vmheight parameter - only the messages in the `apply` set are
//
// If the caller wants to simply compute the state, vmheight should be set to
// the epoch of the specified tipset.
//
// Messages in the `apply` parameter must have the correct nonces, and gas
// values set.
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) //perm:read
// StateVerifierStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifierStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedClientStatus returns the data cap for the given address.
// Returns nil if there is no entry in the data cap table for the
// address.
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) //perm:read
// StateVerifiedRegistryRootKey returns the address of the Verified Registry's root key
StateVerifiedRegistryRootKey(ctx context.Context, tsk types.TipSetKey) (address.Address, error) //perm:read
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
// can issue. It takes the deal size and verified status as parameters.
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (api.DealCollateralBounds, error) //perm:read
// StateCirculatingSupply returns the exact circulating supply of Filecoin at the given tipset.
// This is not used anywhere in the protocol itself, and is only for external consumption.
StateCirculatingSupply(context.Context, types.TipSetKey) (abi.TokenAmount, error) //perm:read
// StateVMCirculatingSupplyInternal returns an approximation of the circulating supply of Filecoin at the given tipset.
// This is the value reported by the runtime interface to actors code.
StateVMCirculatingSupplyInternal(context.Context, types.TipSetKey) (api.CirculatingSupply, error) //perm:read
// StateNetworkVersion returns the network version at the given tipset
StateNetworkVersion(context.Context, types.TipSetKey) (apitypes.NetworkVersion, error) //perm:read
// StateActorCodeCIDs returns the CIDs of all the builtin actors for the given network version
StateActorCodeCIDs(context.Context, abinetwork.Version) (map[string]cid.Cid, error) //perm:read
// StateActorManifestCID returns the CID of the builtin actors manifest for the given network version
StateActorManifestCID(context.Context, abinetwork.Version) (cid.Cid, error) //perm:read
// StateGetRandomnessFromTickets is used to sample the chain for randomness.
StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetRandomnessFromBeacon is used to sample the beacon for randomness.
StateGetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) //perm:read
// StateGetNetworkParams return current network params
StateGetNetworkParams(ctx context.Context) (*api.NetworkParams, error) //perm:read
// MethodGroup: Msig
// The Msig methods are used to interact with multisig wallets on the
// filecoin network
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetVestingSchedule returns the vesting details of a given multisig.
MsigGetVestingSchedule(context.Context, address.Address, types.TipSetKey) (api.MsigVesting, error) //perm:read
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error) //perm:read
// MsigGetPending returns pending transactions for the given multisig
// wallet. Once pending transactions are fully approved, they will no longer
// appear here.
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error) //perm:read
// MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
// <initial balance>, <sender address of the create msg>, <gas price>
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign
// MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the
// proposal. This method of approval can be used to ensure you only approve
// exactly the transaction you think you are.
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the approved message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <new signer>, <whether the number of required signers should be increased>
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign
// MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer>, <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer>, <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer>, <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one
// along with the address removal.
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign
// MarketAddBalance adds funds to the market actor
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketGetReserved gets the amount of funds that are currently reserved for the address
MarketGetReserved(ctx context.Context, addr address.Address) (types.BigInt, error) //perm:sign
// MarketReserveFunds reserves funds for a deal
MarketReserveFunds(ctx context.Context, wallet address.Address, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MarketReleaseFunds releases funds reserved by MarketReserveFunds
MarketReleaseFunds(ctx context.Context, addr address.Address, amt types.BigInt) error //perm:sign
// MarketWithdraw withdraws unlocked funds from the market actor
MarketWithdraw(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
// MethodGroup: Paych
// The Paych methods are for interacting with and managing payment channels
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) //perm:sign
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error) //perm:sign
PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) //perm:sign
PaychList(context.Context) ([]address.Address, error) //perm:read
PaychStatus(context.Context, address.Address) (*api.PaychStatus, error) //perm:read
PaychSettle(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychCollect(context.Context, address.Address) (cid.Cid, error) //perm:sign
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) //perm:sign
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) //perm:sign
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error //perm:read
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) //perm:read
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*api.VoucherCreateResult, error) //perm:sign
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) //perm:write
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
// CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
// the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin
}
func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder {
return RetrievalOrder{
Root: o.Root,
Piece: o.Piece,
Size: o.Size,
Total: o.MinPrice,
UnsealPrice: o.UnsealPrice,
PaymentInterval: o.PaymentInterval,
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
Client: client,
Miner: o.Miner,
MinerPeer: &o.MinerPeer,
}
}
type RetrievalOrder struct {
// TODO: make this less unixfs specific
Root cid.Cid
Piece *cid.Cid
DatamodelPathSelector *textselector.Expression
Size uint64
FromLocalCAR string // if specified, get data from a local CARv2 file.
// TODO: support offset
Total types.BigInt
UnsealPrice types.BigInt
PaymentInterval uint64
PaymentIntervalIncrease uint64
Client address.Address
Miner address.Address
MinerPeer *retrievalmarket.RetrievalPeer
}

79
api/v0api/gateway.go Normal file
View File

@ -0,0 +1,79 @@
package v0api
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/dline"
abinetwork "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
// MODIFYING THE API INTERFACE
//
// NOTE: This is the V0 (Stable) API - when adding methods to this interface,
// you'll need to make sure they are also present on the V1 (Unstable) API
//
// This API is implemented in `v1_wrapper.go` as a compatibility layer backed
// by the V1 api
//
// When adding / changing methods in this file:
// * Do the change here
// * Adjust implementation in `node/impl/`
// * Run `make gen` - this will:
// * Generate proxy structs
// * Generate mocks
// * Generate markdown docs
// * Generate openrpc blobs
type Gateway interface {
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error)
GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error)
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
ChainHead(ctx context.Context) (*types.TipSet, error)
ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error)
ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error)
ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error)
ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error)
ChainNotify(context.Context) (<-chan []*api.HeadChange, error)
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error)
MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error)
MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error)
MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error)
MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error)
MsigGetPending(context.Context, address.Address, types.TipSetKey) ([]*api.MsigTransaction, error)
StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error)
StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error)
StateDecodeParams(ctx context.Context, toAddr address.Address, method abi.MethodNum, params []byte, tsk types.TipSetKey) (interface{}, error)
StateGetActor(ctx context.Context, actor address.Address, ts types.TipSetKey) (*types.Actor, error)
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error)
StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error)
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error)
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error)
StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error)
StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error)
StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error)
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error)
Version(context.Context) (api.APIVersion, error)
}
var _ Gateway = *new(FullNode)

32
api/v0api/latest.go Normal file
View File

@ -0,0 +1,32 @@
package v0api
import (
"github.com/filecoin-project/lotus/api"
)
type Common = api.Common
type Net = api.Net
type CommonNet = api.CommonNet
type CommonStruct = api.CommonStruct
type CommonStub = api.CommonStub
type NetStruct = api.NetStruct
type NetStub = api.NetStub
type CommonNetStruct = api.CommonNetStruct
type CommonNetStub = api.CommonNetStub
type StorageMiner = api.StorageMiner
type StorageMinerStruct = api.StorageMinerStruct
type Worker = api.Worker
type WorkerStruct = api.WorkerStruct
type Wallet = api.Wallet
func PermissionedStorMinerAPI(a StorageMiner) StorageMiner {
return api.PermissionedStorMinerAPI(a)
}
func PermissionedWorkerAPI(a Worker) Worker {
return api.PermissionedWorkerAPI(a)
}

14
api/v0api/permissioned.go Normal file
View File

@ -0,0 +1,14 @@
package v0api
import (
"github.com/filecoin-project/go-jsonrpc/auth"
"github.com/filecoin-project/lotus/api"
)
func PermissionedFullAPI(a FullNode) FullNode {
var out FullNodeStruct
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.Internal)
auth.PermissionedProxy(api.AllPermissions, api.DefaultPerms, a, &out.CommonStruct.Internal)
return &out
}

3014
api/v0api/proxy_gen.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

369
api/v0api/v1_wrapper.go Normal file
View File

@ -0,0 +1,369 @@
package v0api
import (
"context"
"github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p/core/peer"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/builtin/v9/miner"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
marketevents "github.com/filecoin-project/lotus/markets/loggers"
)
type WrapperV1Full struct {
v1api.FullNode
}
func (w *WrapperV1Full) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
pi, err := w.FullNode.StateSectorPreCommitInfo(ctx, maddr, s, tsk)
if err != nil {
return miner.SectorPreCommitOnChainInfo{}, err
}
if pi == nil {
return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("precommit info does not exist")
}
return *pi, nil
}
func (w *WrapperV1Full) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) {
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, api.LookbackNoLimit, true)
}
func (w *WrapperV1Full) StateSearchMsgLimited(ctx context.Context, msg cid.Cid, limit abi.ChainEpoch) (*api.MsgLookup, error) {
return w.FullNode.StateSearchMsg(ctx, types.EmptyTSK, msg, limit, true)
}
func (w *WrapperV1Full) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) {
return w.FullNode.StateWaitMsg(ctx, msg, confidence, api.LookbackNoLimit, true)
}
func (w *WrapperV1Full) StateWaitMsgLimited(ctx context.Context, msg cid.Cid, confidence uint64, limit abi.ChainEpoch) (*api.MsgLookup, error) {
return w.FullNode.StateWaitMsg(ctx, msg, confidence, limit, true)
}
func (w *WrapperV1Full) StateGetReceipt(ctx context.Context, msg cid.Cid, from types.TipSetKey) (*types.MessageReceipt, error) {
ml, err := w.FullNode.StateSearchMsg(ctx, from, msg, api.LookbackNoLimit, true)
if err != nil {
return nil, err
}
if ml == nil {
return nil, nil
}
return &ml.Receipt, nil
}
func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) {
ver, err := w.FullNode.Version(ctx)
if err != nil {
return api.APIVersion{}, err
}
ver.APIVersion = api.FullAPIVersion0
return ver, nil
}
func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) {
sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil)
if err != nil {
return cid.Undef, xerrors.Errorf("pushing message: %w", err)
}
return sm.Cid(), nil
}
func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigApprove(ctx, msig, txID, src)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) {
p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
return w.StateGetRandomnessFromTickets(ctx, personalization, randEpoch, entropy, tsk)
}
func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) {
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
}
func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error {
events := make(chan marketevents.RetrievalEvent)
go w.clientRetrieve(ctx, order, ref, events)
for {
select {
case evt, ok := <-events:
if !ok { // done successfully
return nil
}
if evt.Err != "" {
return xerrors.Errorf("retrieval failed: %s", evt.Err)
}
case <-ctx.Done():
return xerrors.Errorf("retrieval timed out")
}
}
}
func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
events := make(chan marketevents.RetrievalEvent)
go w.clientRetrieve(ctx, order, ref, events)
return events, nil
}
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error {
for {
var subscribeEvent api.RetrievalInfo
var evt retrievalmarket.ClientEvent
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case subscribeEvent = <-subscribeEvents:
if subscribeEvent.ID != dealID {
// we can't check the deal ID ahead of time because:
// 1. We need to subscribe before retrieving.
// 2. We won't know the deal ID until after retrieving.
continue
}
if subscribeEvent.Event != nil {
evt = *subscribeEvent.Event
}
}
select {
case <-ctx.Done():
return xerrors.New("Retrieval Timed Out")
case events <- marketevents.RetrievalEvent{
Event: evt,
Status: subscribeEvent.Status,
BytesReceived: subscribeEvent.BytesReceived,
FundsSpent: subscribeEvent.TotalPaid,
}:
}
switch subscribeEvent.Status {
case retrievalmarket.DealStatusCompleted:
return nil
case retrievalmarket.DealStatusRejected:
return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message)
case
retrievalmarket.DealStatusDealNotFound,
retrievalmarket.DealStatusErrored:
return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message)
}
}
}
func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
defer close(events)
finish := func(e error) {
if e != nil {
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
}
}
var dealID retrievalmarket.DealID
if order.FromLocalCAR == "" {
// Subscribe to events before retrieving to avoid losing events.
subscribeCtx, cancel := context.WithCancel(ctx)
defer cancel()
retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx)
if err != nil {
finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err))
return
}
retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{
Root: order.Root,
Piece: order.Piece,
Size: order.Size,
Total: order.Total,
UnsealPrice: order.UnsealPrice,
PaymentInterval: order.PaymentInterval,
PaymentIntervalIncrease: order.PaymentIntervalIncrease,
Client: order.Client,
Miner: order.Miner,
MinerPeer: order.MinerPeer,
})
if err != nil {
finish(xerrors.Errorf("Retrieve failed: %w", err))
return
}
dealID = retrievalRes.DealID
err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events)
if err != nil {
finish(xerrors.Errorf("Retrieve: %w", err))
return
}
}
// If ref is nil, it only fetches the data into the configured blockstore.
if ref == nil {
finish(nil)
return
}
eref := api.ExportRef{
Root: order.Root,
FromLocalCAR: order.FromLocalCAR,
DealID: dealID,
}
if order.DatamodelPathSelector != nil {
s := api.Selector(*order.DatamodelPathSelector)
eref.DAGs = append(eref.DAGs, api.DagSpec{
DataSelector: &s,
ExportMerkleProof: true,
})
}
finish(w.ClientExport(ctx, eref, *ref))
}
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
return w.FullNode.PaychFund(ctx, from, to, amt)
}
func (w *WrapperV1Full) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
a, err := w.FullNode.ClientQueryAsk(ctx, p, miner)
if err != nil {
return nil, err
}
return a.Response, nil
}
func (w *WrapperV1Full) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
return w.StateGetBeaconEntry(ctx, epoch)
}
var _ FullNode = &WrapperV1Full{}

14
api/v1api/latest.go Normal file
View File

@ -0,0 +1,14 @@
package v1api
import (
"github.com/filecoin-project/lotus/api"
)
type FullNode = api.FullNode
type FullNodeStruct = api.FullNodeStruct
type RawFullNodeAPI FullNode
func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a)
}

73
api/version.go Normal file
View File

@ -0,0 +1,73 @@
package api
import (
"fmt"
"golang.org/x/xerrors"
)
type Version uint32
func newVer(major, minor, patch uint8) Version {
return Version(uint32(major)<<16 | uint32(minor)<<8 | uint32(patch))
}
// Ints returns (major, minor, patch) versions
func (ve Version) Ints() (uint32, uint32, uint32) {
v := uint32(ve)
return (v & majorOnlyMask) >> 16, (v & minorOnlyMask) >> 8, v & patchOnlyMask
}
func (ve Version) String() string {
vmj, vmi, vp := ve.Ints()
return fmt.Sprintf("%d.%d.%d", vmj, vmi, vp)
}
func (ve Version) EqMajorMinor(v2 Version) bool {
return ve&minorMask == v2&minorMask
}
type NodeType int
const (
NodeUnknown NodeType = iota
NodeFull
NodeMiner
NodeWorker
)
var RunningNodeType NodeType
func VersionForType(nodeType NodeType) (Version, error) {
switch nodeType {
case NodeFull:
return FullAPIVersion1, nil
case NodeMiner:
return MinerAPIVersion0, nil
case NodeWorker:
return WorkerAPIVersion0, nil
default:
return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
}
}
// semver versions of the rpc api exposed
var (
FullAPIVersion0 = newVer(1, 5, 0)
FullAPIVersion1 = newVer(2, 3, 0)
MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 7, 0)
)
//nolint:varcheck,deadcode
const (
majorMask = 0xff0000
minorMask = 0xffff00
patchMask = 0xffffff
majorOnlyMask = 0xff0000
minorOnlyMask = 0x00ff00
patchOnlyMask = 0x0000ff
)

53
api/wrap.go Normal file
View File

@ -0,0 +1,53 @@
package api
import (
"reflect"
)
// Wrap adapts partial api impl to another version
// proxyT is the proxy type used as input in wrapperT
// Usage: Wrap(new(v1api.FullNodeStruct), new(v0api.WrapperV1Full), eventsApi).(EventAPI)
func Wrap(proxyT, wrapperT, impl interface{}) interface{} {
proxy := reflect.New(reflect.TypeOf(proxyT).Elem())
proxyMethods := proxy.Elem().FieldByName("Internal")
ri := reflect.ValueOf(impl)
for i := 0; i < ri.NumMethod(); i++ {
mt := ri.Type().Method(i)
if proxyMethods.FieldByName(mt.Name).Kind() == reflect.Invalid {
continue
}
fn := ri.Method(i)
of := proxyMethods.FieldByName(mt.Name)
proxyMethods.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
return fn.Call(args)
}))
}
for i := 0; i < proxy.Elem().NumField(); i++ {
if proxy.Elem().Type().Field(i).Name == "Internal" {
continue
}
subProxy := proxy.Elem().Field(i).FieldByName("Internal")
for i := 0; i < ri.NumMethod(); i++ {
mt := ri.Type().Method(i)
if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid {
continue
}
fn := ri.Method(i)
of := subProxy.FieldByName(mt.Name)
subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
return fn.Call(args)
}))
}
}
wp := reflect.New(reflect.TypeOf(wrapperT).Elem())
wp.Elem().Field(0).Set(proxy)
return wp.Interface()
}

73
blockstore/api.go Normal file
View File

@ -0,0 +1,73 @@
package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
)
type ChainIO interface {
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
ChainHasObj(context.Context, cid.Cid) (bool, error)
ChainPutObj(context.Context, blocks.Block) error
}
type apiBlockstore struct {
api ChainIO
}
// This blockstore is adapted in the constructor.
var _ BasicBlockstore = (*apiBlockstore)(nil)
func NewAPIBlockstore(cio ChainIO) Blockstore {
bs := &apiBlockstore{api: cio}
return Adapt(bs) // return an adapted blockstore.
}
func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error {
return xerrors.New("not supported")
}
func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
return a.api.ChainHasObj(ctx, c)
}
func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
bb, err := a.api.ChainReadObj(ctx, c)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(bb, c)
}
func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
bb, err := a.api.ChainReadObj(ctx, c)
if err != nil {
return 0, err
}
return len(bb), nil
}
func (a *apiBlockstore) Put(ctx context.Context, block blocks.Block) error {
return a.api.ChainPutObj(ctx, block)
}
func (a *apiBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
for _, block := range blocks {
err := a.api.ChainPutObj(ctx, block)
if err != nil {
return err
}
}
return nil
}
func (a *apiBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.New("not supported")
}
func (a *apiBlockstore) HashOnRead(enabled bool) {
return
}

266
blockstore/autobatch.go Normal file
View File

@ -0,0 +1,266 @@
package blockstore
import (
"context"
"sync"
"time"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
"golang.org/x/xerrors"
)
// autolog is a logger for the autobatching blockstore. It is subscoped from the
// blockstore logger.
var autolog = log.Named("auto")
// contains the same set of blocks twice, once as an ordered list for flushing, and as a map for fast access
type blockBatch struct {
blockList []block.Block
blockMap map[cid.Cid]block.Block
}
type AutobatchBlockstore struct {
// TODO: drop if memory consumption is too high
addedCids map[cid.Cid]struct{}
stateLock sync.Mutex
bufferedBatch blockBatch
flushingBatch blockBatch
flushErr error
flushCh chan struct{}
doFlushLock sync.Mutex
flushRetryDelay time.Duration
doneCh chan struct{}
shutdown context.CancelFunc
backingBs Blockstore
bufferCapacity int
bufferSize int
}
func NewAutobatch(ctx context.Context, backingBs Blockstore, bufferCapacity int) *AutobatchBlockstore {
ctx, cancel := context.WithCancel(ctx)
bs := &AutobatchBlockstore{
addedCids: make(map[cid.Cid]struct{}),
backingBs: backingBs,
bufferCapacity: bufferCapacity,
flushCh: make(chan struct{}, 1),
doneCh: make(chan struct{}),
// could be made configable
flushRetryDelay: time.Millisecond * 100,
shutdown: cancel,
}
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block)
go bs.flushWorker(ctx)
return bs
}
func (bs *AutobatchBlockstore) Put(ctx context.Context, blk block.Block) error {
bs.stateLock.Lock()
defer bs.stateLock.Unlock()
_, ok := bs.addedCids[blk.Cid()]
if !ok {
bs.addedCids[blk.Cid()] = struct{}{}
bs.bufferedBatch.blockList = append(bs.bufferedBatch.blockList, blk)
bs.bufferedBatch.blockMap[blk.Cid()] = blk
bs.bufferSize += len(blk.RawData())
if bs.bufferSize >= bs.bufferCapacity {
// signal that a flush is appropriate, may be ignored
select {
case bs.flushCh <- struct{}{}:
default:
// do nothing
}
}
}
return nil
}
func (bs *AutobatchBlockstore) flushWorker(ctx context.Context) {
defer close(bs.doneCh)
for {
select {
case <-bs.flushCh:
// TODO: check if we _should_ actually flush. We could get a spurious wakeup
// here.
putErr := bs.doFlush(ctx, false)
for putErr != nil {
select {
case <-ctx.Done():
return
case <-time.After(bs.flushRetryDelay):
autolog.Errorf("FLUSH ERRORED: %w, retrying after %v", putErr, bs.flushRetryDelay)
putErr = bs.doFlush(ctx, true)
}
}
case <-ctx.Done():
// Do one last flush.
_ = bs.doFlush(ctx, false)
return
}
}
}
// caller must NOT hold stateLock
// set retryOnly to true to only retry a failed flush and not flush anything new.
func (bs *AutobatchBlockstore) doFlush(ctx context.Context, retryOnly bool) error {
bs.doFlushLock.Lock()
defer bs.doFlushLock.Unlock()
// If we failed to flush last time, try flushing again.
if bs.flushErr != nil {
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
}
// If we failed, or we're _only_ retrying, bail.
if retryOnly || bs.flushErr != nil {
return bs.flushErr
}
// Then take the current batch...
bs.stateLock.Lock()
// We do NOT clear addedCids here, because its purpose is to expedite Puts
bs.flushingBatch = bs.bufferedBatch
bs.bufferedBatch.blockList = make([]block.Block, 0, len(bs.flushingBatch.blockList))
bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block, len(bs.flushingBatch.blockMap))
bs.stateLock.Unlock()
// And try to flush it.
bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList)
// If we succeeded, reset the batch. Otherwise, we'll try again next time.
if bs.flushErr == nil {
bs.stateLock.Lock()
bs.flushingBatch = blockBatch{}
bs.stateLock.Unlock()
}
return bs.flushErr
}
// caller must NOT hold stateLock
func (bs *AutobatchBlockstore) Flush(ctx context.Context) error {
return bs.doFlush(ctx, false)
}
func (bs *AutobatchBlockstore) Shutdown(ctx context.Context) error {
// TODO: Prevent puts after we call this to avoid losing data.
bs.shutdown()
select {
case <-bs.doneCh:
case <-ctx.Done():
return ctx.Err()
}
bs.doFlushLock.Lock()
defer bs.doFlushLock.Unlock()
return bs.flushErr
}
func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
// may seem backward to check the backingBs first, but that is the likeliest case
blk, err := bs.backingBs.Get(ctx, c)
if err == nil {
return blk, nil
}
if !ipld.IsNotFound(err) {
return blk, err
}
bs.stateLock.Lock()
v, ok := bs.flushingBatch.blockMap[c]
if ok {
bs.stateLock.Unlock()
return v, nil
}
v, ok = bs.bufferedBatch.blockMap[c]
if ok {
bs.stateLock.Unlock()
return v, nil
}
bs.stateLock.Unlock()
// We have to check the backing store one more time because it may have been flushed by the
// time we were able to take the lock above.
return bs.backingBs.Get(ctx, c)
}
func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error {
// if we wanted to support this, we would have to:
// - flush
// - delete from the backingBs (if present)
// - remove from addedCids (if present)
// - if present in addedCids, also walk the ordered lists and remove if present
return xerrors.New("deletion is unsupported")
}
func (bs *AutobatchBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
// see note in DeleteBlock()
return xerrors.New("deletion is unsupported")
}
func (bs *AutobatchBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
_, err := bs.Get(ctx, c)
if err == nil {
return true, nil
}
if ipld.IsNotFound(err) {
return false, nil
}
return false, err
}
func (bs *AutobatchBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
blk, err := bs.Get(ctx, c)
if err != nil {
return 0, err
}
return len(blk.RawData()), nil
}
func (bs *AutobatchBlockstore) PutMany(ctx context.Context, blks []block.Block) error {
for _, blk := range blks {
if err := bs.Put(ctx, blk); err != nil {
return err
}
}
return nil
}
func (bs *AutobatchBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
if err := bs.Flush(ctx); err != nil {
return nil, err
}
return bs.backingBs.AllKeysChan(ctx)
}
func (bs *AutobatchBlockstore) HashOnRead(enabled bool) {
bs.backingBs.HashOnRead(enabled)
}
func (bs *AutobatchBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
blk, err := bs.Get(ctx, cid)
if err != nil {
return err
}
return callback(blk.RawData())
}

View File

@ -0,0 +1,39 @@
package blockstore
import (
"context"
"testing"
ipld "github.com/ipfs/go-ipld-format"
"github.com/stretchr/testify/require"
)
func TestAutobatchBlockstore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ab := NewAutobatch(ctx, NewMemory(), len(b0.RawData())+len(b1.RawData())-1)
require.NoError(t, ab.Put(ctx, b0))
require.NoError(t, ab.Put(ctx, b1))
require.NoError(t, ab.Put(ctx, b2))
v0, err := ab.Get(ctx, b0.Cid())
require.NoError(t, err)
require.Equal(t, b0.RawData(), v0.RawData())
v1, err := ab.Get(ctx, b1.Cid())
require.NoError(t, err)
require.Equal(t, b1.RawData(), v1.RawData())
v2, err := ab.Get(ctx, b2.Cid())
require.NoError(t, err)
require.Equal(t, b2.RawData(), v2.RawData())
// Regression test for a deadlock.
_, err = ab.Get(ctx, b3.Cid())
require.True(t, ipld.IsNotFound(err))
require.NoError(t, ab.Flush(ctx))
require.NoError(t, ab.Shutdown(ctx))
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,272 @@
// stm: #unit
package badgerbs
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"strings"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/filecoin-project/lotus/blockstore"
)
func TestBadgerBlockstore(t *testing.T) {
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
(&Suite{
NewBlockstore: newBlockstore(DefaultOptions),
OpenBlockstore: openBlockstore(DefaultOptions),
}).RunTests(t, "non_prefixed")
prefixed := func(path string) Options {
opts := DefaultOptions(path)
opts.Prefix = "/prefixed/"
return opts
}
(&Suite{
NewBlockstore: newBlockstore(prefixed),
OpenBlockstore: openBlockstore(prefixed),
}).RunTests(t, "prefixed")
}
func TestStorageKey(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_STORAGE_KEY_001
bs, _ := newBlockstore(DefaultOptions)(t)
bbs := bs.(*Blockstore)
defer bbs.Close() //nolint:errcheck
cid1 := blocks.NewBlock([]byte("some data")).Cid()
cid2 := blocks.NewBlock([]byte("more data")).Cid()
cid3 := blocks.NewBlock([]byte("a little more data")).Cid()
require.NotEqual(t, cid1, cid2) // sanity check
require.NotEqual(t, cid2, cid3) // sanity check
// nil slice; let StorageKey allocate for us.
k1 := bbs.StorageKey(nil, cid1)
require.Len(t, k1, 55)
require.True(t, cap(k1) == len(k1))
// k1's backing array is reused.
k2 := bbs.StorageKey(k1, cid2)
require.Len(t, k2, 55)
require.True(t, cap(k2) == len(k1))
// bring k2 to len=0, and verify that its backing array gets reused
// (i.e. k1 and k2 are overwritten)
k3 := bbs.StorageKey(k2[:0], cid3)
require.Len(t, k3, 55)
require.True(t, cap(k3) == len(k3))
// backing array of k1 and k2 has been modified, i.e. memory is shared.
require.Equal(t, k3, k1)
require.Equal(t, k3, k2)
}
func newBlockstore(optsSupplier func(path string) Options) func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
return func(tb testing.TB) (bs blockstore.BasicBlockstore, path string) {
tb.Helper()
path = tb.TempDir()
db, err := Open(optsSupplier(path))
if err != nil {
tb.Fatal(err)
}
return db, path
}
}
func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
return func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error) {
tb.Helper()
return Open(optsSupplier(path))
}
}
func testMove(t *testing.T, optsF func(string) Options) {
ctx := context.Background()
basePath := t.TempDir()
dbPath := filepath.Join(basePath, "db")
db, err := Open(optsF(dbPath))
if err != nil {
t.Fatal(err)
}
defer db.Close() //nolint
var have []blocks.Block
var deleted []cid.Cid
// add some blocks
for i := 0; i < 10; i++ {
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
err := db.Put(ctx, blk)
if err != nil {
t.Fatal(err)
}
have = append(have, blk)
}
// delete some of them
for i := 5; i < 10; i++ {
c := have[i].Cid()
err := db.DeleteBlock(ctx, c)
if err != nil {
t.Fatal(err)
}
deleted = append(deleted, c)
}
have = have[:5]
// start a move concurrent with some more puts
g := new(errgroup.Group)
g.Go(func() error {
for i := 10; i < 1000; i++ {
blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
err := db.Put(ctx, blk)
if err != nil {
return err
}
have = append(have, blk)
}
return nil
})
g.Go(func() error {
return db.CollectGarbage(ctx, blockstore.WithFullGC(true))
})
err = g.Wait()
if err != nil {
t.Fatal(err)
}
// now check that we have all the blocks in have and none in the deleted lists
checkBlocks := func() {
for _, blk := range have {
has, err := db.Has(ctx, blk.Cid())
if err != nil {
t.Fatal(err)
}
if !has {
t.Fatal("missing block")
}
blk2, err := db.Get(ctx, blk.Cid())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(blk.RawData(), blk2.RawData()) {
t.Fatal("data mismatch")
}
}
for _, c := range deleted {
has, err := db.Has(ctx, c)
if err != nil {
t.Fatal(err)
}
if has {
t.Fatal("resurrected block")
}
}
}
checkBlocks()
// check the basePath -- it should contain a directory with name db.{timestamp}, soft-linked
// to db and nothing else
checkPath := func() {
entries, err := os.ReadDir(basePath)
if err != nil {
t.Fatal(err)
}
if len(entries) != 2 {
t.Fatalf("too many entries; expected %d but got %d", 2, len(entries))
}
var haveDB, haveDBLink bool
for _, e := range entries {
if e.Name() == "db" {
if (e.Type() & os.ModeSymlink) == 0 {
t.Fatal("found db, but it's not a symlink")
}
haveDBLink = true
continue
}
if strings.HasPrefix(e.Name(), "db.") {
if !e.Type().IsDir() {
t.Fatal("found db prefix, but it's not a directory")
}
haveDB = true
continue
}
}
if !haveDB {
t.Fatal("db directory is missing")
}
if !haveDBLink {
t.Fatal("db link is missing")
}
}
checkPath()
// now do another FullGC to test the double move and following of symlinks
if err := db.CollectGarbage(ctx, blockstore.WithFullGC(true)); err != nil {
t.Fatal(err)
}
checkBlocks()
checkPath()
// reopen the db to make sure our relative link works:
err = db.Close()
if err != nil {
t.Fatal(err)
}
db, err = Open(optsF(dbPath))
if err != nil {
t.Fatal(err)
}
// db.Close() is already deferred
checkBlocks()
}
func TestMoveNoPrefix(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
testMove(t, DefaultOptions)
}
func TestMoveWithPrefix(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_COLLECT_GARBAGE_001
testMove(t, func(path string) Options {
opts := DefaultOptions(path)
opts.Prefix = "/prefixed/"
return opts
})
}

View File

@ -0,0 +1,358 @@
// stm: #unit
package badgerbs
import (
"context"
"fmt"
"io"
"reflect"
"strings"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
u "github.com/ipfs/go-ipfs-util"
ipld "github.com/ipfs/go-ipld-format"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/lotus/blockstore"
)
// TODO: move this to go-ipfs-blockstore.
type Suite struct {
NewBlockstore func(tb testing.TB) (bs blockstore.BasicBlockstore, path string)
OpenBlockstore func(tb testing.TB, path string) (bs blockstore.BasicBlockstore, err error)
}
func (s *Suite) RunTests(t *testing.T, prefix string) {
v := reflect.TypeOf(s)
f := func(t *testing.T) {
for i := 0; i < v.NumMethod(); i++ {
if m := v.Method(i); strings.HasPrefix(m.Name, "Test") {
f := m.Func.Interface().(func(*Suite, *testing.T))
t.Run(m.Name, func(t *testing.T) {
f(s, t)
})
}
}
}
if prefix == "" {
f(t)
} else {
t.Run(prefix, f)
}
}
func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
c := cid.NewCidV0(u.Hash([]byte("stuff")))
bl, err := bs.Get(ctx, c)
require.Nil(t, bl)
require.Equal(t, ipld.ErrNotFound{Cid: c}, err)
}
func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_GET_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
_, err := bs.Get(ctx, cid.Undef)
require.Equal(t, ipld.ErrNotFound{Cid: cid.Undef}, err)
}
func (s *Suite) TestPutThenGetBlock(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_GET_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
orig := blocks.NewBlock([]byte("some data"))
err := bs.Put(ctx, orig)
require.NoError(t, err)
fetched, err := bs.Get(ctx, orig.Cid())
require.NoError(t, err)
require.Equal(t, orig.RawData(), fetched.RawData())
}
func (s *Suite) TestHas(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
orig := blocks.NewBlock([]byte("some data"))
err := bs.Put(ctx, orig)
require.NoError(t, err)
ok, err := bs.Has(ctx, orig.Cid())
require.NoError(t, err)
require.True(t, ok)
ok, err = bs.Has(ctx, blocks.NewBlock([]byte("another thing")).Cid())
require.NoError(t, err)
require.False(t, ok)
}
func (s *Suite) TestCidv0v1(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_GET_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
orig := blocks.NewBlock([]byte("some data"))
err := bs.Put(ctx, orig)
require.NoError(t, err)
fetched, err := bs.Get(ctx, cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash()))
require.NoError(t, err)
require.Equal(t, orig.RawData(), fetched.RawData())
}
func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_GET_SIZE_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
block := blocks.NewBlock([]byte("some data"))
missingBlock := blocks.NewBlock([]byte("missingBlock"))
emptyBlock := blocks.NewBlock([]byte{})
err := bs.Put(ctx, block)
require.NoError(t, err)
blockSize, err := bs.GetSize(ctx, block.Cid())
require.NoError(t, err)
require.Len(t, block.RawData(), blockSize)
err = bs.Put(ctx, emptyBlock)
require.NoError(t, err)
emptySize, err := bs.GetSize(ctx, emptyBlock.Cid())
require.NoError(t, err)
require.Zero(t, emptySize)
missingCid := missingBlock.Cid()
missingSize, err := bs.GetSize(ctx, missingCid)
require.Equal(t, ipld.ErrNotFound{Cid: missingCid}, err)
require.Equal(t, -1, missingSize)
}
func (s *Suite) TestAllKeysSimple(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
keys := insertBlocks(t, bs, 100)
ctx := context.Background()
ch, err := bs.AllKeysChan(ctx)
require.NoError(t, err)
actual := collect(ch)
require.ElementsMatch(t, keys, actual)
}
func (s *Suite) TestAllKeysRespectsContext(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
_ = insertBlocks(t, bs, 100)
ctx, cancel := context.WithCancel(context.Background())
ch, err := bs.AllKeysChan(ctx)
require.NoError(t, err)
// consume 2, then cancel context.
v, ok := <-ch
require.NotEqual(t, cid.Undef, v)
require.True(t, ok)
v, ok = <-ch
require.NotEqual(t, cid.Undef, v)
require.True(t, ok)
cancel()
// pull one value out to avoid race
_, _ = <-ch
v, ok = <-ch
require.Equal(t, cid.Undef, v)
require.False(t, ok)
}
func (s *Suite) TestDoubleClose(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
bs, _ := s.NewBlockstore(t)
c, ok := bs.(io.Closer)
if !ok {
t.SkipNow()
}
require.NoError(t, c.Close())
require.NoError(t, c.Close())
}
func (s *Suite) TestReopenPutGet(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_GET_001
ctx := context.Background()
bs, path := s.NewBlockstore(t)
c, ok := bs.(io.Closer)
if !ok {
t.SkipNow()
}
orig := blocks.NewBlock([]byte("some data"))
err := bs.Put(ctx, orig)
require.NoError(t, err)
err = c.Close()
require.NoError(t, err)
bs, err = s.OpenBlockstore(t, path)
require.NoError(t, err)
fetched, err := bs.Get(ctx, orig.Cid())
require.NoError(t, err)
require.Equal(t, orig.RawData(), fetched.RawData())
err = bs.(io.Closer).Close()
require.NoError(t, err)
}
func (s *Suite) TestPutMany(t *testing.T) {
//stm: @SPLITSTORE_BADGER_OPEN_001, @SPLITSTORE_BADGER_CLOSE_001
//stm: @SPLITSTORE_BADGER_HAS_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_GET_001, @SPLITSTORE_BADGER_PUT_MANY_001
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
blks := []blocks.Block{
blocks.NewBlock([]byte("foo1")),
blocks.NewBlock([]byte("foo2")),
blocks.NewBlock([]byte("foo3")),
}
err := bs.PutMany(ctx, blks)
require.NoError(t, err)
for _, blk := range blks {
fetched, err := bs.Get(ctx, blk.Cid())
require.NoError(t, err)
require.Equal(t, blk.RawData(), fetched.RawData())
ok, err := bs.Has(ctx, blk.Cid())
require.NoError(t, err)
require.True(t, ok)
}
ch, err := bs.AllKeysChan(context.Background())
require.NoError(t, err)
cids := collect(ch)
require.Len(t, cids, 3)
}
func (s *Suite) TestDelete(t *testing.T) {
//stm: @SPLITSTORE_BADGER_PUT_001, @SPLITSTORE_BADGER_POOLED_STORAGE_KEY_001
//stm: @SPLITSTORE_BADGER_DELETE_001, @SPLITSTORE_BADGER_POOLED_STORAGE_HAS_001
//stm: @SPLITSTORE_BADGER_ALL_KEYS_CHAN_001, @SPLITSTORE_BADGER_HAS_001
//stm: @SPLITSTORE_BADGER_PUT_MANY_001
ctx := context.Background()
bs, _ := s.NewBlockstore(t)
if c, ok := bs.(io.Closer); ok {
defer func() { require.NoError(t, c.Close()) }()
}
blks := []blocks.Block{
blocks.NewBlock([]byte("foo1")),
blocks.NewBlock([]byte("foo2")),
blocks.NewBlock([]byte("foo3")),
}
err := bs.PutMany(ctx, blks)
require.NoError(t, err)
err = bs.DeleteBlock(ctx, blks[1].Cid())
require.NoError(t, err)
ch, err := bs.AllKeysChan(context.Background())
require.NoError(t, err)
cids := collect(ch)
require.Len(t, cids, 2)
require.ElementsMatch(t, cids, []cid.Cid{
cid.NewCidV1(cid.Raw, blks[0].Cid().Hash()),
cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()),
})
has, err := bs.Has(ctx, blks[1].Cid())
require.NoError(t, err)
require.False(t, has)
}
func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid {
ctx := context.Background()
keys := make([]cid.Cid, count)
for i := 0; i < count; i++ {
block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i)))
err := bs.Put(ctx, block)
require.NoError(t, err)
// NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what
// the store returns.
keys[i] = cid.NewCidV1(cid.Raw, block.Multihash())
}
return keys
}
func collect(ch <-chan cid.Cid) []cid.Cid {
var keys []cid.Cid
for k := range ch {
keys = append(keys, k)
}
return keys
}

169
blockstore/blockstore.go Normal file
View File

@ -0,0 +1,169 @@
package blockstore
import (
"context"
"time"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
blockstore "github.com/ipfs/go-ipfs-blockstore"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("blockstore")
// Blockstore is the blockstore interface used by Lotus. It is the union
// of the basic go-ipfs blockstore, with other capabilities required by Lotus,
// e.g. View or Sync.
type Blockstore interface {
blockstore.Blockstore
blockstore.Viewer
BatchDeleter
Flusher
}
// BasicBlockstore is an alias to the original IPFS Blockstore.
type BasicBlockstore = blockstore.Blockstore
type Viewer = blockstore.Viewer
type Flusher interface {
Flush(context.Context) error
}
type BatchDeleter interface {
DeleteMany(ctx context.Context, cids []cid.Cid) error
}
// BlockstoreIterator is a trait for efficient iteration
type BlockstoreIterator interface {
ForEachKey(func(cid.Cid) error) error
}
// BlockstoreGC is a trait for blockstores that support online garbage collection
type BlockstoreGC interface {
CollectGarbage(ctx context.Context, options ...BlockstoreGCOption) error
}
// BlockstoreGCOnce is a trait for a blockstore that supports incremental online garbage collection
type BlockstoreGCOnce interface {
GCOnce(ctx context.Context, options ...BlockstoreGCOption) error
}
// BlockstoreGCOption is a functional interface for controlling blockstore GC options
type BlockstoreGCOption = func(*BlockstoreGCOptions) error
// BlockstoreGCOptions is a struct with GC options
type BlockstoreGCOptions struct {
FullGC bool
// fraction of garbage in badger vlog before its worth processing in online GC
Threshold float64
// how often to call the check function
CheckFreq time.Duration
// function to call periodically to pause or early terminate GC
Check func() error
}
func WithFullGC(fullgc bool) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.FullGC = fullgc
return nil
}
}
func WithThreshold(threshold float64) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.Threshold = threshold
return nil
}
}
func WithCheckFreq(f time.Duration) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.CheckFreq = f
return nil
}
}
func WithCheck(check func() error) BlockstoreGCOption {
return func(opts *BlockstoreGCOptions) error {
opts.Check = check
return nil
}
}
// BlockstoreSize is a trait for on-disk blockstores that can report their size
type BlockstoreSize interface {
Size() (int64, error)
}
// WrapIDStore wraps the underlying blockstore in an "identity" blockstore.
// The ID store filters out all puts for blocks with CIDs using the "identity"
// hash function. It also extracts inlined blocks from CIDs using the identity
// hash function and returns them on get/has, ignoring the contents of the
// blockstore.
func WrapIDStore(bstore blockstore.Blockstore) Blockstore {
if is, ok := bstore.(*idstore); ok {
// already wrapped
return is
}
if bs, ok := bstore.(Blockstore); ok {
// we need to wrap our own because we don't want to neuter the DeleteMany method
// the underlying blockstore has implemented an (efficient) DeleteMany
return NewIDStore(bs)
}
// The underlying blockstore does not implement DeleteMany, so we need to shim it.
// This is less efficient as it'll iterate and perform single deletes.
return NewIDStore(Adapt(bstore))
}
// FromDatastore creates a new blockstore backed by the given datastore.
func FromDatastore(dstore ds.Batching) Blockstore {
return WrapIDStore(blockstore.NewBlockstore(dstore))
}
type adaptedBlockstore struct {
blockstore.Blockstore
}
var _ Blockstore = (*adaptedBlockstore)(nil)
func (a *adaptedBlockstore) Flush(ctx context.Context) error {
if flusher, canFlush := a.Blockstore.(Flusher); canFlush {
return flusher.Flush(ctx)
}
return nil
}
func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
blk, err := a.Get(ctx, cid)
if err != nil {
return err
}
return callback(blk.RawData())
}
func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
for _, cid := range cids {
err := a.DeleteBlock(ctx, cid)
if err != nil {
return err
}
}
return nil
}
// Adapt adapts a standard blockstore to a Lotus blockstore by
// enriching it with the extra methods that Lotus requires (e.g. View, Sync).
//
// View proxies over to Get and calls the callback with the value supplied by Get.
// Sync noops.
func Adapt(bs blockstore.Blockstore) Blockstore {
if ret, ok := bs.(Blockstore); ok {
return ret
}
return &adaptedBlockstore{bs}
}

177
blockstore/buffered.go Normal file
View File

@ -0,0 +1,177 @@
package blockstore
import (
"context"
"os"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
)
// buflog is a logger for the buffered blockstore. It is subscoped from the
// blockstore logger.
var buflog = log.Named("buf")
type BufferedBlockstore struct {
read Blockstore
write Blockstore
}
func NewBuffered(base Blockstore) *BufferedBlockstore {
var buf Blockstore
if os.Getenv("LOTUS_DISABLE_VM_BUF") == "iknowitsabadidea" {
buflog.Warn("VM BLOCKSTORE BUFFERING IS DISABLED")
buf = base
} else {
buf = NewMemory()
}
bs := &BufferedBlockstore{
read: base,
write: buf,
}
return bs
}
func NewTieredBstore(r Blockstore, w Blockstore) *BufferedBlockstore {
return &BufferedBlockstore{
read: r,
write: w,
}
}
var (
_ Blockstore = (*BufferedBlockstore)(nil)
_ Viewer = (*BufferedBlockstore)(nil)
)
func (bs *BufferedBlockstore) Flush(ctx context.Context) error { return bs.write.Flush(ctx) }
func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
a, err := bs.read.AllKeysChan(ctx)
if err != nil {
return nil, err
}
b, err := bs.write.AllKeysChan(ctx)
if err != nil {
return nil, err
}
out := make(chan cid.Cid)
go func() {
defer close(out)
for a != nil || b != nil {
select {
case val, ok := <-a:
if !ok {
a = nil
} else {
select {
case out <- val:
case <-ctx.Done():
return
}
}
case val, ok := <-b:
if !ok {
b = nil
} else {
select {
case out <- val:
case <-ctx.Done():
return
}
}
}
}
}()
return out, nil
}
func (bs *BufferedBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error {
if err := bs.read.DeleteBlock(ctx, c); err != nil {
return err
}
return bs.write.DeleteBlock(ctx, c)
}
func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
if err := bs.read.DeleteMany(ctx, cids); err != nil {
return err
}
return bs.write.DeleteMany(ctx, cids)
}
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
// both stores are viewable.
if err := bs.write.View(ctx, c, callback); ipld.IsNotFound(err) {
// not found in write blockstore; fall through.
} else {
return err // propagate errors, or nil, i.e. found.
}
return bs.read.View(ctx, c, callback)
}
func (bs *BufferedBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) {
if out, err := bs.write.Get(ctx, c); err != nil {
if !ipld.IsNotFound(err) {
return nil, err
}
} else {
return out, nil
}
return bs.read.Get(ctx, c)
}
func (bs *BufferedBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
s, err := bs.read.GetSize(ctx, c)
if ipld.IsNotFound(err) || s == 0 {
return bs.write.GetSize(ctx, c)
}
return s, err
}
func (bs *BufferedBlockstore) Put(ctx context.Context, blk block.Block) error {
has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check
if err != nil {
return err
}
if has {
return nil
}
return bs.write.Put(ctx, blk)
}
func (bs *BufferedBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) {
has, err := bs.write.Has(ctx, c)
if err != nil {
return false, err
}
if has {
return true, nil
}
return bs.read.Has(ctx, c)
}
func (bs *BufferedBlockstore) HashOnRead(hor bool) {
bs.read.HashOnRead(hor)
bs.write.HashOnRead(hor)
}
func (bs *BufferedBlockstore) PutMany(ctx context.Context, blks []block.Block) error {
return bs.write.PutMany(ctx, blks)
}
func (bs *BufferedBlockstore) Read() Blockstore {
return bs.read
}

441
blockstore/cbor_gen.go Normal file
View File

@ -0,0 +1,441 @@
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
package blockstore
import (
"fmt"
"io"
"math"
"sort"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = math.E
var _ = sort.Sort
var lengthBufNetRpcReq = []byte{132}
func (t *NetRpcReq) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufNetRpcReq); err != nil {
return err
}
// t.Type (blockstore.NetRPCReqType) (uint8)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
return err
}
// t.ID (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
return err
}
// t.Cid ([]cid.Cid) (slice)
if len(t.Cid) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Cid was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cid))); err != nil {
return err
}
for _, v := range t.Cid {
if err := cbg.WriteCid(w, v); err != nil {
return xerrors.Errorf("failed writing cid field t.Cid: %w", err)
}
}
// t.Data ([][]uint8) (slice)
if len(t.Data) > cbg.MaxLength {
return xerrors.Errorf("Slice value in field t.Data was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Data))); err != nil {
return err
}
for _, v := range t.Data {
if len(v) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field v was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(v))); err != nil {
return err
}
if _, err := cw.Write(v[:]); err != nil {
return err
}
}
return nil
}
func (t *NetRpcReq) UnmarshalCBOR(r io.Reader) (err error) {
*t = NetRpcReq{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 4 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Type (blockstore.NetRPCReqType) (uint8)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint8 field")
}
if extra > math.MaxUint8 {
return fmt.Errorf("integer in input was too large for uint8 field")
}
t.Type = NetRPCReqType(extra)
// t.ID (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ID = uint64(extra)
}
// t.Cid ([]cid.Cid) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.MaxLength {
return fmt.Errorf("t.Cid: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Cid = make([]cid.Cid, extra)
}
for i := 0; i < int(extra); i++ {
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("reading cid field t.Cid failed: %w", err)
}
t.Cid[i] = c
}
// t.Data ([][]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.MaxLength {
return fmt.Errorf("t.Data: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Data = make([][]uint8, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Data[i]: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Data[i] = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Data[i][:]); err != nil {
return err
}
}
}
return nil
}
var lengthBufNetRpcResp = []byte{131}
func (t *NetRpcResp) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufNetRpcResp); err != nil {
return err
}
// t.Type (blockstore.NetRPCRespType) (uint8)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
return err
}
// t.ID (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ID)); err != nil {
return err
}
// t.Data ([]uint8) (slice)
if len(t.Data) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Data was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil {
return err
}
if _, err := cw.Write(t.Data[:]); err != nil {
return err
}
return nil
}
func (t *NetRpcResp) UnmarshalCBOR(r io.Reader) (err error) {
*t = NetRpcResp{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Type (blockstore.NetRPCRespType) (uint8)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint8 field")
}
if extra > math.MaxUint8 {
return fmt.Errorf("integer in input was too large for uint8 field")
}
t.Type = NetRPCRespType(extra)
// t.ID (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ID = uint64(extra)
}
// t.Data ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Data: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Data = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Data[:]); err != nil {
return err
}
return nil
}
var lengthBufNetRpcErr = []byte{131}
func (t *NetRpcErr) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufNetRpcErr); err != nil {
return err
}
// t.Type (blockstore.NetRPCErrType) (uint8)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Type)); err != nil {
return err
}
// t.Msg (string) (string)
if len(t.Msg) > cbg.MaxLength {
return xerrors.Errorf("Value in field t.Msg was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Msg))); err != nil {
return err
}
if _, err := io.WriteString(w, string(t.Msg)); err != nil {
return err
}
// t.Cid (cid.Cid) (struct)
if t.Cid == nil {
if _, err := cw.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCid(cw, *t.Cid); err != nil {
return xerrors.Errorf("failed to write cid field t.Cid: %w", err)
}
}
return nil
}
func (t *NetRpcErr) UnmarshalCBOR(r io.Reader) (err error) {
*t = NetRpcErr{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Type (blockstore.NetRPCErrType) (uint8)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint8 field")
}
if extra > math.MaxUint8 {
return fmt.Errorf("integer in input was too large for uint8 field")
}
t.Type = NetRPCErrType(extra)
// t.Msg (string) (string)
{
sval, err := cbg.ReadString(cr)
if err != nil {
return err
}
t.Msg = string(sval)
}
// t.Cid (cid.Cid) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Cid: %w", err)
}
t.Cid = &c
}
}
return nil
}

21
blockstore/context.go Normal file
View File

@ -0,0 +1,21 @@
package blockstore
import (
"context"
)
type hotViewKey struct{}
var hotView = hotViewKey{}
// WithHotView constructs a new context with an option that provides a hint to the blockstore
// (e.g. the splitstore) that the object (and its ipld references) should be kept hot.
func WithHotView(ctx context.Context) context.Context {
return context.WithValue(ctx, hotView, struct{}{})
}
// IsHotView returns true if the hot view option is set in the context
func IsHotView(ctx context.Context) bool {
v := ctx.Value(hotView)
return v != nil
}

70
blockstore/discard.go Normal file
View File

@ -0,0 +1,70 @@
package blockstore
import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
)
var _ Blockstore = (*discardstore)(nil)
type discardstore struct {
bs Blockstore
}
func NewDiscardStore(bs Blockstore) Blockstore {
return &discardstore{bs: bs}
}
func (b *discardstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
return b.bs.Has(ctx, cid)
}
func (b *discardstore) HashOnRead(hor bool) {
b.bs.HashOnRead(hor)
}
func (b *discardstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
return b.bs.Get(ctx, cid)
}
func (b *discardstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
return b.bs.GetSize(ctx, cid)
}
func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error {
return b.bs.View(ctx, cid, f)
}
func (b *discardstore) Flush(ctx context.Context) error {
return nil
}
func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error {
return nil
}
func (b *discardstore) PutMany(ctx context.Context, blks []blocks.Block) error {
return nil
}
func (b *discardstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
return nil
}
func (b *discardstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
return nil
}
func (b *discardstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return b.bs.AllKeysChan(ctx)
}
func (b *discardstore) Close() error {
if c, ok := b.bs.(io.Closer); ok {
return c.Close()
}
return nil
}

9
blockstore/doc.go Normal file
View File

@ -0,0 +1,9 @@
// Package blockstore and subpackages contain most of the blockstore
// implementations used by Lotus.
//
// Blockstores not ultimately constructed out of the building blocks in this
// package may not work properly.
//
// This package re-exports parts of the go-ipfs-blockstore package such that
// no other package needs to import it directly, for ergonomics and traceability.
package blockstore

106
blockstore/fallback.go Normal file
View File

@ -0,0 +1,106 @@
package blockstore
import (
"context"
"sync"
"time"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
"golang.org/x/xerrors"
)
// UnwrapFallbackStore takes a blockstore, and returns the underlying blockstore
// if it was a FallbackStore. Otherwise, it just returns the supplied store
// unmodified.
func UnwrapFallbackStore(bs Blockstore) (Blockstore, bool) {
if fbs, ok := bs.(*FallbackStore); ok {
return fbs.Blockstore, true
}
return bs, false
}
// FallbackStore is a read-through store that queries another (potentially
// remote) source if the block is not found locally. If the block is found
// during the fallback, it stores it in the local store.
type FallbackStore struct {
Blockstore
lk sync.RWMutex
// missFn is the function that will be invoked on a local miss to pull the
// block from elsewhere.
missFn func(context.Context, cid.Cid) (blocks.Block, error)
}
var _ Blockstore = (*FallbackStore)(nil)
func (fbs *FallbackStore) SetFallback(missFn func(context.Context, cid.Cid) (blocks.Block, error)) {
fbs.lk.Lock()
defer fbs.lk.Unlock()
fbs.missFn = missFn
}
func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) {
log.Warnf("fallbackstore: block not found locally, fetching from the network; cid: %s", c)
fbs.lk.RLock()
defer fbs.lk.RUnlock()
if fbs.missFn == nil {
// FallbackStore wasn't configured yet (chainstore/bitswap aren't up yet)
// Wait for a bit and retry
fbs.lk.RUnlock()
time.Sleep(5 * time.Second)
fbs.lk.RLock()
if fbs.missFn == nil {
log.Errorw("fallbackstore: missFn not configured yet")
return nil, ipld.ErrNotFound{Cid: c}
}
}
ctx, cancel := context.WithTimeout(context.TODO(), 120*time.Second)
defer cancel()
b, err := fbs.missFn(ctx, c)
if err != nil {
return nil, err
}
// chain bitswap puts blocks in temp blockstore which is cleaned up
// every few min (to drop any messages we fetched but don't want)
// in this case we want to keep this block around
if err := fbs.Put(ctx, b); err != nil {
return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err)
}
return b, nil
}
func (fbs *FallbackStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
b, err := fbs.Blockstore.Get(ctx, c)
switch {
case err == nil:
return b, nil
case ipld.IsNotFound(err):
return fbs.getFallback(c)
default:
return b, err
}
}
func (fbs *FallbackStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
sz, err := fbs.Blockstore.GetSize(ctx, c)
switch {
case err == nil:
return sz, nil
case ipld.IsNotFound(err):
b, err := fbs.getFallback(c)
if err != nil {
return 0, err
}
return len(b.RawData()), nil
default:
return sz, err
}
}

185
blockstore/idstore.go Normal file
View File

@ -0,0 +1,185 @@
package blockstore
import (
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
)
var _ Blockstore = (*idstore)(nil)
type idstore struct {
bs Blockstore
}
func NewIDStore(bs Blockstore) Blockstore {
return &idstore{bs: bs}
}
func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) {
if cid.Prefix().MhType != mh.IDENTITY {
return false, nil, nil
}
dmh, err := mh.Decode(cid.Hash())
if err != nil {
return false, nil, err
}
if dmh.Code == mh.IDENTITY {
return true, dmh.Digest, nil
}
return false, nil, err
}
func (b *idstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
inline, _, err := decodeCid(cid)
if err != nil {
return false, xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return true, nil
}
return b.bs.Has(ctx, cid)
}
func (b *idstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
inline, data, err := decodeCid(cid)
if err != nil {
return nil, xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return blocks.NewBlockWithCid(data, cid)
}
return b.bs.Get(ctx, cid)
}
func (b *idstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
inline, data, err := decodeCid(cid)
if err != nil {
return 0, xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return len(data), err
}
return b.bs.GetSize(ctx, cid)
}
func (b *idstore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error {
inline, data, err := decodeCid(cid)
if err != nil {
return xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return cb(data)
}
return b.bs.View(ctx, cid, cb)
}
func (b *idstore) Put(ctx context.Context, blk blocks.Block) error {
inline, _, err := decodeCid(blk.Cid())
if err != nil {
return xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return nil
}
return b.bs.Put(ctx, blk)
}
func (b *idstore) ForEachKey(f func(cid.Cid) error) error {
iterBstore, ok := b.bs.(BlockstoreIterator)
if !ok {
return xerrors.Errorf("underlying blockstore (type %T) doesn't support fast iteration", b.bs)
}
return iterBstore.ForEachKey(f)
}
func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error {
toPut := make([]blocks.Block, 0, len(blks))
for _, blk := range blks {
inline, _, err := decodeCid(blk.Cid())
if err != nil {
return xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
continue
}
toPut = append(toPut, blk)
}
if len(toPut) > 0 {
return b.bs.PutMany(ctx, toPut)
}
return nil
}
func (b *idstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
inline, _, err := decodeCid(cid)
if err != nil {
return xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
return nil
}
return b.bs.DeleteBlock(ctx, cid)
}
func (b *idstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
toDelete := make([]cid.Cid, 0, len(cids))
for _, cid := range cids {
inline, _, err := decodeCid(cid)
if err != nil {
return xerrors.Errorf("error decoding Cid: %w", err)
}
if inline {
continue
}
toDelete = append(toDelete, cid)
}
if len(toDelete) > 0 {
return b.bs.DeleteMany(ctx, toDelete)
}
return nil
}
func (b *idstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return b.bs.AllKeysChan(ctx)
}
func (b *idstore) HashOnRead(enabled bool) {
b.bs.HashOnRead(enabled)
}
func (b *idstore) Close() error {
if c, ok := b.bs.(io.Closer); ok {
return c.Close()
}
return nil
}
func (b *idstore) Flush(ctx context.Context) error {
return b.bs.Flush(ctx)
}

153
blockstore/ipfs.go Normal file
View File

@ -0,0 +1,153 @@
package blockstore
import (
"bytes"
"context"
"io"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
httpapi "github.com/ipfs/go-ipfs-http-client"
iface "github.com/ipfs/interface-go-ipfs-core"
"github.com/ipfs/interface-go-ipfs-core/options"
"github.com/ipfs/interface-go-ipfs-core/path"
"github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
)
type IPFSBlockstore struct {
ctx context.Context
api, offlineAPI iface.CoreAPI
}
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
localApi, err := httpapi.NewLocalApi()
if err != nil {
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
}
api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("setting offline mode: %s", err)
}
offlineAPI := api
if onlineMode {
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
httpApi, err := httpapi.NewApi(maddr)
if err != nil {
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
}
api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
offlineAPI := api
if onlineMode {
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
return xerrors.Errorf("not supported")
}
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
_, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid))
if err != nil {
// The underlying client is running in Offline mode.
// Stat() will fail with an err if the block isn't in the
// blockstore. If that's the case, return false without
// an error since that's the original intention of this method.
if err.Error() == "blockservice: key not found" {
return false, nil
}
return false, xerrors.Errorf("getting ipfs block: %w", err)
}
return true, nil
}
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
rd, err := i.api.Block().Get(ctx, path.IpldPath(cid))
if err != nil {
return nil, xerrors.Errorf("getting ipfs block: %w", err)
}
data, err := io.ReadAll(rd)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(data, cid)
}
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
st, err := i.api.Block().Stat(ctx, path.IpldPath(cid))
if err != nil {
return 0, xerrors.Errorf("getting ipfs block: %w", err)
}
return st.Size(), nil
}
func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error {
mhd, err := multihash.Decode(block.Cid().Hash())
if err != nil {
return err
}
_, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()),
options.Block.Hash(mhd.Code, mhd.Length),
options.Block.Format(multihash.Codes[block.Cid().Type()]))
return err
}
func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
// TODO: could be done in parallel
for _, block := range blocks {
if err := i.Put(ctx, block); err != nil {
return err
}
}
return nil
}
func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.Errorf("not supported")
}
func (i *IPFSBlockstore) HashOnRead(enabled bool) {
return // TODO: We could technically support this, but..
}

109
blockstore/mem.go Normal file
View File

@ -0,0 +1,109 @@
package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
)
// NewMemory returns a temporary memory-backed blockstore.
func NewMemory() MemBlockstore {
return make(MemBlockstore)
}
// MemBlockstore is a terminal blockstore that keeps blocks in memory.
// To match behavior of badger blockstore we index by multihash only.
type MemBlockstore map[string]blocks.Block
func (MemBlockstore) Flush(context.Context) error { return nil }
func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error {
delete(m, string(k.Hash()))
return nil
}
func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error {
for _, k := range ks {
delete(m, string(k.Hash()))
}
return nil
}
func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) {
_, ok := m[string(k.Hash())]
return ok, nil
}
func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error {
b, ok := m[string(k.Hash())]
if !ok {
return ipld.ErrNotFound{Cid: k}
}
return callback(b.RawData())
}
func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) {
b, ok := m[string(k.Hash())]
if !ok {
return nil, ipld.ErrNotFound{Cid: k}
}
if b.Cid().Prefix().Codec != k.Prefix().Codec {
return blocks.NewBlockWithCid(b.RawData(), k)
}
return b, nil
}
// GetSize returns the CIDs mapped BlockSize
func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) {
b, ok := m[string(k.Hash())]
if !ok {
return 0, ipld.ErrNotFound{Cid: k}
}
return len(b.RawData()), nil
}
// Put puts a given block to the underlying datastore
func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error {
// Convert to a basic block for safety, but try to reuse the existing
// block if it's already a basic block.
k := string(b.Cid().Hash())
if _, ok := b.(*blocks.BasicBlock); !ok {
// If we already have the block, abort.
if _, ok := m[k]; ok {
return nil
}
// the error is only for debugging.
b, _ = blocks.NewBlockWithCid(b.RawData(), b.Cid())
}
m[k] = b
return nil
}
// PutMany puts a slice of blocks at the same time using batching
// capabilities of the underlying datastore whenever possible.
func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error {
for _, b := range bs {
_ = m.Put(ctx, b) // can't fail
}
return nil
}
// AllKeysChan returns a channel from which
// the CIDs in the Blockstore can be read. It should respect
// the given context, closing the channel if it becomes Done.
func (m MemBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
ch := make(chan cid.Cid, len(m))
for _, b := range m {
ch <- b.Cid()
}
close(ch)
return ch, nil
}
// HashOnRead specifies if every read block should be
// rehashed to make sure it matches its CID.
func (m MemBlockstore) HashOnRead(enabled bool) {
// no-op
}

45
blockstore/mem_test.go Normal file
View File

@ -0,0 +1,45 @@
package blockstore
import (
"context"
"testing"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
mh "github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
)
func TestMemGetCodec(t *testing.T) {
ctx := context.Background()
bs := NewMemory()
cborArr := []byte{0x82, 1, 2}
h, err := mh.Sum(cborArr, mh.SHA2_256, -1)
require.NoError(t, err)
rawCid := cid.NewCidV1(cid.Raw, h)
rawBlk, err := blocks.NewBlockWithCid(cborArr, rawCid)
require.NoError(t, err)
err = bs.Put(ctx, rawBlk)
require.NoError(t, err)
cborCid := cid.NewCidV1(cid.DagCBOR, h)
cborBlk, err := bs.Get(ctx, cborCid)
require.NoError(t, err)
require.Equal(t, cborCid.Prefix(), cborBlk.Cid().Prefix())
require.EqualValues(t, cborArr, cborBlk.RawData())
// was allocated
require.NotEqual(t, cborBlk, rawBlk)
gotRawBlk, err := bs.Get(ctx, rawCid)
require.NoError(t, err)
// not allocated
require.Equal(t, rawBlk, gotRawBlk)
}

154
blockstore/metrics.go Normal file
View File

@ -0,0 +1,154 @@
package blockstore
import (
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
//
// Currently unused, but kept in repo in case we introduce one of the candidate
// cache implementations (Freecache, Ristretto), both of which report these
// metrics.
//
// CacheMetricsEmitInterval is the interval at which metrics are emitted onto
// OpenCensus.
var CacheMetricsEmitInterval = 5 * time.Second
var (
CacheName, _ = tag.NewKey("cache_name")
)
// CacheMeasures groups all metrics emitted by the blockstore caches.
var CacheMeasures = struct {
HitRatio *stats.Float64Measure
Hits *stats.Int64Measure
Misses *stats.Int64Measure
Entries *stats.Int64Measure
QueriesServed *stats.Int64Measure
Adds *stats.Int64Measure
Updates *stats.Int64Measure
Evictions *stats.Int64Measure
CostAdded *stats.Int64Measure
CostEvicted *stats.Int64Measure
SetsDropped *stats.Int64Measure
SetsRejected *stats.Int64Measure
QueriesDropped *stats.Int64Measure
}{
HitRatio: stats.Float64("blockstore/cache/hit_ratio", "Hit ratio of blockstore cache", stats.UnitDimensionless),
Hits: stats.Int64("blockstore/cache/hits", "Total number of hits at blockstore cache", stats.UnitDimensionless),
Misses: stats.Int64("blockstore/cache/misses", "Total number of misses at blockstore cache", stats.UnitDimensionless),
Entries: stats.Int64("blockstore/cache/entry_count", "Total number of entries currently in the blockstore cache", stats.UnitDimensionless),
QueriesServed: stats.Int64("blockstore/cache/queries_served", "Total number of queries served by the blockstore cache", stats.UnitDimensionless),
Adds: stats.Int64("blockstore/cache/adds", "Total number of adds to blockstore cache", stats.UnitDimensionless),
Updates: stats.Int64("blockstore/cache/updates", "Total number of updates in blockstore cache", stats.UnitDimensionless),
Evictions: stats.Int64("blockstore/cache/evictions", "Total number of evictions from blockstore cache", stats.UnitDimensionless),
CostAdded: stats.Int64("blockstore/cache/cost_added", "Total cost (byte size) of entries added into blockstore cache", stats.UnitBytes),
CostEvicted: stats.Int64("blockstore/cache/cost_evicted", "Total cost (byte size) of entries evicted by blockstore cache", stats.UnitBytes),
SetsDropped: stats.Int64("blockstore/cache/sets_dropped", "Total number of sets dropped by blockstore cache", stats.UnitDimensionless),
SetsRejected: stats.Int64("blockstore/cache/sets_rejected", "Total number of sets rejected by blockstore cache", stats.UnitDimensionless),
QueriesDropped: stats.Int64("blockstore/cache/queries_dropped", "Total number of queries dropped by blockstore cache", stats.UnitDimensionless),
}
// CacheViews groups all cache-related default views.
var CacheViews = struct {
HitRatio *view.View
Hits *view.View
Misses *view.View
Entries *view.View
QueriesServed *view.View
Adds *view.View
Updates *view.View
Evictions *view.View
CostAdded *view.View
CostEvicted *view.View
SetsDropped *view.View
SetsRejected *view.View
QueriesDropped *view.View
}{
HitRatio: &view.View{
Measure: CacheMeasures.HitRatio,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Hits: &view.View{
Measure: CacheMeasures.Hits,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Misses: &view.View{
Measure: CacheMeasures.Misses,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Entries: &view.View{
Measure: CacheMeasures.Entries,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
QueriesServed: &view.View{
Measure: CacheMeasures.QueriesServed,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Adds: &view.View{
Measure: CacheMeasures.Adds,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Updates: &view.View{
Measure: CacheMeasures.Updates,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
Evictions: &view.View{
Measure: CacheMeasures.Evictions,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
CostAdded: &view.View{
Measure: CacheMeasures.CostAdded,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
CostEvicted: &view.View{
Measure: CacheMeasures.CostEvicted,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
SetsDropped: &view.View{
Measure: CacheMeasures.SetsDropped,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
SetsRejected: &view.View{
Measure: CacheMeasures.SetsRejected,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
QueriesDropped: &view.View{
Measure: CacheMeasures.QueriesDropped,
Aggregation: view.LastValue(),
TagKeys: []tag.Key{CacheName},
},
}
// DefaultViews exports all default views for this package.
var DefaultViews = []*view.View{
CacheViews.HitRatio,
CacheViews.Hits,
CacheViews.Misses,
CacheViews.Entries,
CacheViews.QueriesServed,
CacheViews.Adds,
CacheViews.Updates,
CacheViews.Evictions,
CacheViews.CostAdded,
CacheViews.CostEvicted,
CacheViews.SetsDropped,
CacheViews.SetsRejected,
CacheViews.QueriesDropped,
}

426
blockstore/net.go Normal file
View File

@ -0,0 +1,426 @@
package blockstore
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"sync"
"sync/atomic"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
"github.com/libp2p/go-msgio"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
)
type NetRPCReqType byte
const (
NRpcHas NetRPCReqType = iota
NRpcGet
NRpcGetSize
NRpcPut
NRpcDelete
// todo cancel req
)
type NetRPCRespType byte
const (
NRpcOK NetRPCRespType = iota
NRpcErr
NRpcMore
)
type NetRPCErrType byte
const (
NRpcErrGeneric NetRPCErrType = iota
NRpcErrNotFound
)
type NetRpcReq struct {
Type NetRPCReqType
ID uint64
Cid []cid.Cid // todo maxsize?
Data [][]byte // todo maxsize?
}
type NetRpcResp struct {
Type NetRPCRespType
ID uint64
// error or cids in allkeys
Data []byte // todo maxsize?
next <-chan NetRpcResp
}
type NetRpcErr struct {
Type NetRPCErrType
Msg string
// in case of NRpcErrNotFound
Cid *cid.Cid
}
type NetworkStore struct {
// note: writer is thread-safe
msgStream msgio.ReadWriteCloser
// atomic
reqCount uint64
respLk sync.Mutex
// respMap is nil after store closes
respMap map[uint64]chan<- NetRpcResp
closing chan struct{}
closed chan struct{}
closeLk sync.Mutex
onClose []func()
}
func NewNetworkStore(mss msgio.ReadWriteCloser) *NetworkStore {
ns := &NetworkStore{
msgStream: mss,
respMap: map[uint64]chan<- NetRpcResp{},
closing: make(chan struct{}),
closed: make(chan struct{}),
}
go ns.receive()
return ns
}
func (n *NetworkStore) shutdown(msg string) {
if err := n.msgStream.Close(); err != nil {
log.Errorw("closing netstore msg stream", "error", err)
}
nerr := NetRpcErr{
Type: NRpcErrGeneric,
Msg: msg,
Cid: nil,
}
var errb bytes.Buffer
if err := nerr.MarshalCBOR(&errb); err != nil {
log.Errorw("netstore shutdown: error marshaling error", "err", err)
}
n.respLk.Lock()
for id, resps := range n.respMap {
resps <- NetRpcResp{
Type: NRpcErr,
ID: id,
Data: errb.Bytes(),
}
}
n.respMap = nil
n.respLk.Unlock()
}
func (n *NetworkStore) OnClose(cb func()) {
n.closeLk.Lock()
defer n.closeLk.Unlock()
select {
case <-n.closed:
cb()
default:
n.onClose = append(n.onClose, cb)
}
}
func (n *NetworkStore) receive() {
defer func() {
n.closeLk.Lock()
defer n.closeLk.Unlock()
close(n.closed)
if n.onClose != nil {
for _, f := range n.onClose {
f()
}
}
}()
for {
select {
case <-n.closing:
n.shutdown("netstore stopping")
return
default:
}
msg, err := n.msgStream.ReadMsg()
if err != nil {
n.shutdown(fmt.Sprintf("netstore ReadMsg: %s", err))
return
}
var resp NetRpcResp
if err := resp.UnmarshalCBOR(bytes.NewReader(msg)); err != nil {
n.shutdown(fmt.Sprintf("unmarshaling netstore response: %s", err))
return
}
n.msgStream.ReleaseMsg(msg)
n.respLk.Lock()
if ch, ok := n.respMap[resp.ID]; ok {
if resp.Type == NRpcMore {
nch := make(chan NetRpcResp, 1)
resp.next = nch
n.respMap[resp.ID] = nch
} else {
delete(n.respMap, resp.ID)
}
ch <- resp
}
n.respLk.Unlock()
}
}
func (n *NetworkStore) sendRpc(rt NetRPCReqType, cids []cid.Cid, data [][]byte) (uint64, <-chan NetRpcResp, error) {
rid := atomic.AddUint64(&n.reqCount, 1)
respCh := make(chan NetRpcResp, 1) // todo pool?
n.respLk.Lock()
if n.respMap == nil {
n.respLk.Unlock()
return 0, nil, xerrors.Errorf("netstore closed")
}
n.respMap[rid] = respCh
n.respLk.Unlock()
req := NetRpcReq{
Type: rt,
ID: rid,
Cid: cids,
Data: data,
}
var rbuf bytes.Buffer // todo buffer pool
if err := req.MarshalCBOR(&rbuf); err != nil {
n.respLk.Lock()
defer n.respLk.Unlock()
if n.respMap == nil {
return 0, nil, xerrors.Errorf("netstore closed")
}
delete(n.respMap, rid)
return 0, nil, err
}
if err := n.msgStream.WriteMsg(rbuf.Bytes()); err != nil {
n.respLk.Lock()
defer n.respLk.Unlock()
if n.respMap == nil {
return 0, nil, xerrors.Errorf("netstore closed")
}
delete(n.respMap, rid)
return 0, nil, err
}
return rid, respCh, nil
}
func (n *NetworkStore) waitResp(ctx context.Context, rch <-chan NetRpcResp, rid uint64) (NetRpcResp, error) {
select {
case resp := <-rch:
if resp.Type == NRpcErr {
var e NetRpcErr
if err := e.UnmarshalCBOR(bytes.NewReader(resp.Data)); err != nil {
return NetRpcResp{}, xerrors.Errorf("unmarshaling error data: %w", err)
}
var err error
switch e.Type {
case NRpcErrNotFound:
if e.Cid != nil {
err = ipld.ErrNotFound{
Cid: *e.Cid,
}
} else {
err = xerrors.Errorf("block not found, but cid was null")
}
case NRpcErrGeneric:
err = xerrors.Errorf("generic error")
default:
err = xerrors.Errorf("unknown error type")
}
return NetRpcResp{}, xerrors.Errorf("netstore error response: %s (%w)", e.Msg, err)
}
return resp, nil
case <-ctx.Done():
// todo send cancel req
n.respLk.Lock()
if n.respMap != nil {
delete(n.respMap, rid)
}
n.respLk.Unlock()
return NetRpcResp{}, ctx.Err()
}
}
func (n *NetworkStore) Has(ctx context.Context, c cid.Cid) (bool, error) {
req, rch, err := n.sendRpc(NRpcHas, []cid.Cid{c}, nil)
if err != nil {
return false, err
}
resp, err := n.waitResp(ctx, rch, req)
if err != nil {
return false, err
}
if len(resp.Data) != 1 {
return false, xerrors.Errorf("expected reposnse length to be 1 byte")
}
switch resp.Data[0] {
case cbg.CborBoolTrue[0]:
return true, nil
case cbg.CborBoolFalse[0]:
return false, nil
default:
return false, xerrors.Errorf("has: bad response: %x", resp.Data[0])
}
}
func (n *NetworkStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) {
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
if err != nil {
return nil, err
}
resp, err := n.waitResp(ctx, rch, req)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(resp.Data, c)
}
func (n *NetworkStore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
req, rch, err := n.sendRpc(NRpcGet, []cid.Cid{c}, nil)
if err != nil {
return err
}
resp, err := n.waitResp(ctx, rch, req)
if err != nil {
return err
}
return callback(resp.Data) // todo return buf to pool
}
func (n *NetworkStore) GetSize(ctx context.Context, c cid.Cid) (int, error) {
req, rch, err := n.sendRpc(NRpcGetSize, []cid.Cid{c}, nil)
if err != nil {
return 0, err
}
resp, err := n.waitResp(ctx, rch, req)
if err != nil {
return 0, err
}
if len(resp.Data) != 4 {
return 0, xerrors.Errorf("expected getsize response to be 4 bytes, was %d", resp.Data)
}
return int(binary.LittleEndian.Uint32(resp.Data)), nil
}
func (n *NetworkStore) Put(ctx context.Context, block blocks.Block) error {
return n.PutMany(ctx, []blocks.Block{block})
}
func (n *NetworkStore) PutMany(ctx context.Context, blocks []blocks.Block) error {
// todo pool
cids := make([]cid.Cid, len(blocks))
blkDatas := make([][]byte, len(blocks))
for i, block := range blocks {
cids[i] = block.Cid()
blkDatas[i] = block.RawData()
}
req, rch, err := n.sendRpc(NRpcPut, cids, blkDatas)
if err != nil {
return err
}
_, err = n.waitResp(ctx, rch, req)
if err != nil {
return err
}
return nil
}
func (n *NetworkStore) DeleteBlock(ctx context.Context, c cid.Cid) error {
return n.DeleteMany(ctx, []cid.Cid{c})
}
func (n *NetworkStore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
req, rch, err := n.sendRpc(NRpcDelete, cids, nil)
if err != nil {
return err
}
_, err = n.waitResp(ctx, rch, req)
if err != nil {
return err
}
return nil
}
func (n *NetworkStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.Errorf("not supported")
}
func (n *NetworkStore) HashOnRead(enabled bool) {
// todo
return
}
func (*NetworkStore) Flush(context.Context) error { return nil }
func (n *NetworkStore) Stop(ctx context.Context) error {
close(n.closing)
select {
case <-n.closed:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
var _ Blockstore = &NetworkStore{}

237
blockstore/net_serve.go Normal file
View File

@ -0,0 +1,237 @@
package blockstore
import (
"bytes"
"context"
"encoding/binary"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ipld "github.com/ipfs/go-ipld-format"
"github.com/libp2p/go-msgio"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
)
type NetworkStoreHandler struct {
msgStream msgio.ReadWriteCloser
bs Blockstore
}
// NOTE: This code isn't yet hardened to accept untrusted input. See TODOs here and in net.go
func HandleNetBstoreStream(ctx context.Context, bs Blockstore, mss msgio.ReadWriteCloser) *NetworkStoreHandler {
ns := &NetworkStoreHandler{
msgStream: mss,
bs: bs,
}
go ns.handle(ctx)
return ns
}
func (h *NetworkStoreHandler) handle(ctx context.Context) {
defer func() {
if err := h.msgStream.Close(); err != nil {
log.Errorw("error closing blockstore stream", "error", err)
}
}()
for {
var req NetRpcReq
ms, err := h.msgStream.ReadMsg()
if err != nil {
log.Warnw("bstore stream err", "error", err)
return
}
if err := req.UnmarshalCBOR(bytes.NewReader(ms)); err != nil {
return
}
h.msgStream.ReleaseMsg(ms)
switch req.Type {
case NRpcHas:
if len(req.Cid) != 1 {
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
res, err := h.bs.Has(ctx, req.Cid[0])
if err != nil {
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
var resData [1]byte
if res {
resData[0] = cbg.CborBoolTrue[0]
} else {
resData[0] = cbg.CborBoolFalse[0]
}
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
log.Warnw("writing response", "error", err)
return
}
case NRpcGet:
if len(req.Cid) != 1 {
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
err := h.bs.View(ctx, req.Cid[0], func(bdata []byte) error {
return h.respond(req.ID, NRpcOK, bdata)
})
if err != nil {
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
case NRpcGetSize:
if len(req.Cid) != 1 {
if err := h.respondError(req.ID, xerrors.New("expected request for 1 cid"), cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
sz, err := h.bs.GetSize(ctx, req.Cid[0])
if err != nil {
if err := h.respondError(req.ID, err, req.Cid[0]); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
var resData [4]byte
binary.LittleEndian.PutUint32(resData[:], uint32(sz))
if err := h.respond(req.ID, NRpcOK, resData[:]); err != nil {
log.Warnw("writing response", "error", err)
return
}
case NRpcPut:
blocks := make([]block.Block, len(req.Cid))
if len(req.Cid) != len(req.Data) {
if err := h.respondError(req.ID, xerrors.New("cid count didn't match data count"), cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
}
return
}
for i := range req.Cid {
blocks[i], err = block.NewBlockWithCid(req.Data[i], req.Cid[i])
if err != nil {
log.Warnw("make block", "error", err)
return
}
}
err := h.bs.PutMany(ctx, blocks)
if err != nil {
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
log.Warnw("writing response", "error", err)
return
}
case NRpcDelete:
err := h.bs.DeleteMany(ctx, req.Cid)
if err != nil {
if err := h.respondError(req.ID, err, cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
if err := h.respond(req.ID, NRpcOK, []byte{}); err != nil {
log.Warnw("writing response", "error", err)
return
}
default:
if err := h.respondError(req.ID, xerrors.New("unsupported request type"), cid.Undef); err != nil {
log.Warnw("writing error response", "error", err)
return
}
continue
}
}
}
func (h *NetworkStoreHandler) respondError(req uint64, uerr error, c cid.Cid) error {
var resp NetRpcResp
resp.ID = req
resp.Type = NRpcErr
nerr := NetRpcErr{
Type: NRpcErrGeneric,
Msg: uerr.Error(),
}
if ipld.IsNotFound(uerr) {
nerr.Type = NRpcErrNotFound
nerr.Cid = &c
}
var edata bytes.Buffer
if err := nerr.MarshalCBOR(&edata); err != nil {
return xerrors.Errorf("marshaling error data: %w", err)
}
resp.Data = edata.Bytes()
var msg bytes.Buffer
if err := resp.MarshalCBOR(&msg); err != nil {
return xerrors.Errorf("marshaling error response: %w", err)
}
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
return xerrors.Errorf("write error response: %w", err)
}
return nil
}
func (h *NetworkStoreHandler) respond(req uint64, rt NetRPCRespType, data []byte) error {
var resp NetRpcResp
resp.ID = req
resp.Type = rt
resp.Data = data
var msg bytes.Buffer
if err := resp.MarshalCBOR(&msg); err != nil {
return xerrors.Errorf("marshaling response: %w", err)
}
if err := h.msgStream.WriteMsg(msg.Bytes()); err != nil {
return xerrors.Errorf("write response: %w", err)
}
return nil
}

63
blockstore/net_test.go Normal file
View File

@ -0,0 +1,63 @@
package blockstore
import (
"context"
"fmt"
"io"
"testing"
block "github.com/ipfs/go-block-format"
ipld "github.com/ipfs/go-ipld-format"
"github.com/libp2p/go-msgio"
"github.com/stretchr/testify/require"
)
func TestNetBstore(t *testing.T) {
ctx := context.Background()
cr, sw := io.Pipe()
sr, cw := io.Pipe()
cm := msgio.Combine(msgio.NewWriter(cw), msgio.NewReader(cr))
sm := msgio.Combine(msgio.NewWriter(sw), msgio.NewReader(sr))
bbs := NewMemorySync()
_ = HandleNetBstoreStream(ctx, bbs, sm)
nbs := NewNetworkStore(cm)
tb1 := block.NewBlock([]byte("aoeu"))
h, err := nbs.Has(ctx, tb1.Cid())
require.NoError(t, err)
require.False(t, h)
err = nbs.Put(ctx, tb1)
require.NoError(t, err)
h, err = nbs.Has(ctx, tb1.Cid())
require.NoError(t, err)
require.True(t, h)
sz, err := nbs.GetSize(ctx, tb1.Cid())
require.NoError(t, err)
require.Equal(t, 4, sz)
err = nbs.DeleteBlock(ctx, tb1.Cid())
require.NoError(t, err)
h, err = nbs.Has(ctx, tb1.Cid())
require.NoError(t, err)
require.False(t, h)
_, err = nbs.Get(ctx, tb1.Cid())
fmt.Println(err)
require.True(t, ipld.IsNotFound(err))
err = nbs.Put(ctx, tb1)
require.NoError(t, err)
b, err := nbs.Get(ctx, tb1.Cid())
require.NoError(t, err)
require.Equal(t, "aoeu", string(b.RawData()))
}

100
blockstore/net_ws.go Normal file
View File

@ -0,0 +1,100 @@
package blockstore
import (
"bytes"
"context"
"github.com/gorilla/websocket"
"github.com/libp2p/go-msgio"
"golang.org/x/xerrors"
)
type wsWrapper struct {
wc *websocket.Conn
nextMsg []byte
}
func (w *wsWrapper) Read(b []byte) (int, error) {
return 0, xerrors.New("read unsupported")
}
func (w *wsWrapper) ReadMsg() ([]byte, error) {
if w.nextMsg != nil {
nm := w.nextMsg
w.nextMsg = nil
return nm, nil
}
mt, r, err := w.wc.NextReader()
if err != nil {
return nil, err
}
switch mt {
case websocket.BinaryMessage, websocket.TextMessage:
default:
return nil, xerrors.Errorf("unexpected message type")
}
// todo pool
// todo limit sizes
var mbuf bytes.Buffer
if _, err := mbuf.ReadFrom(r); err != nil {
return nil, err
}
return mbuf.Bytes(), nil
}
func (w *wsWrapper) ReleaseMsg(bytes []byte) {
// todo use a pool
}
func (w *wsWrapper) NextMsgLen() (int, error) {
if w.nextMsg != nil {
return len(w.nextMsg), nil
}
mt, msg, err := w.wc.ReadMessage()
if err != nil {
return 0, err
}
switch mt {
case websocket.BinaryMessage, websocket.TextMessage:
default:
return 0, xerrors.Errorf("unexpected message type")
}
w.nextMsg = msg
return len(w.nextMsg), nil
}
func (w *wsWrapper) Write(bytes []byte) (int, error) {
return 0, xerrors.New("write unsupported")
}
func (w *wsWrapper) WriteMsg(bytes []byte) error {
return w.wc.WriteMessage(websocket.BinaryMessage, bytes)
}
func (w *wsWrapper) Close() error {
return w.wc.Close()
}
var _ msgio.ReadWriteCloser = &wsWrapper{}
func wsConnToMio(wc *websocket.Conn) msgio.ReadWriteCloser {
return &wsWrapper{
wc: wc,
}
}
func HandleNetBstoreWS(ctx context.Context, bs Blockstore, wc *websocket.Conn) *NetworkStoreHandler {
return HandleNetBstoreStream(ctx, bs, wsConnToMio(wc))
}
func NewNetworkStoreWS(wc *websocket.Conn) *NetworkStore {
return NewNetworkStore(wsConnToMio(wc))
}

View File

@ -0,0 +1,132 @@
# SplitStore: An actively scalable blockstore for the Filecoin chain
The SplitStore was first introduced in lotus v1.5.1, as an experiment
in reducing the performance impact of large blockstores.
With lotus v1.11.1, we introduce the next iteration in design and
implementation, which we call SplitStore v1.
The new design (see [#6474](https://github.com/filecoin-project/lotus/pull/6474)
evolves the splitstore to be a freestanding compacting blockstore that
allows us to keep a small (60-100GB) working set in a hot blockstore
and reliably archive out of scope objects in a coldstore. The
coldstore can also be a discard store, whereby out of scope objects
are discarded or a regular badger blockstore (the default), which can
be periodically garbage collected according to configurable user
retention policies.
To enable the splitstore, edit `.lotus/config.toml` and add the following:
```
[Chainstore]
EnableSplitstore = true
```
If you intend to use the discard coldstore, your also need to add the following:
```
[Chainstore.Splitstore]
ColdStoreType = "discard"
```
In general you _should not_ have to use the discard store, unless you
are running a network assistive node (like a bootstrapper or booster)
or have very constrained hardware with not enough disk space to
maintain a coldstore, even with garbage collection. It is also appropriate
for small nodes that are simply watching the chain.
*Warning:* Using the discard store for a general purpose node is discouraged, unless
you really know what you are doing. Use it at your own risk.
## Configuration Options
These are options in the `[Chainstore.Splitstore]` section of the configuration:
- `HotStoreType` -- specifies the type of hotstore to use.
The only currently supported option is `"badger"`.
- `ColdStoreType` -- specifies the type of coldstore to use.
The default value is `"universal"`, which will use the initial monolith blockstore
as the coldstore.
The other possible value is `"discard"`, as outlined above, which is specialized for
running without a coldstore. Note that the discard store wraps the initial monolith
blockstore and discards writes; this is necessary to support syncing from a snapshot.
- `MarkSetType` -- specifies the type of markset to use during compaction.
The markset is the data structure used by compaction/gc to track live objects.
The default value is "badger", which will use a disk backed markset using badger.
If you have a lot of memory (48G or more) you can also use "map", which will use
an in memory markset, speeding up compaction at the cost of higher memory usage.
Note: If you are using a VPS with a network volume, you need to provision at least
3000 IOPs with the badger markset.
- `HotStoreMessageRetention` -- specifies how many finalities, beyond the 4
finalities maintained by default, to maintain messages and message receipts in the
hotstore. This is useful for assistive nodes that want to support syncing for other
nodes beyond 4 finalities, while running with the discard coldstore option.
It is also useful for miners who accept deals and need to lookback messages beyond
the 4 finalities, which would otherwise hit the coldstore.
- `HotStoreFullGCFrequency` -- specifies how frequenty to garbage collect the hotstore
using full (moving) GC.
The default value is 20, which uses full GC every 20 compactions (about once a week);
set to 0 to disable full GC altogether.
Rationale: badger supports online GC, and this is used by default. However it has proven to
be ineffective in practice with the hotstore size slowly creeping up. In order to address this,
we have added moving GC support in our badger wrapper, which can effectively reclaim all space.
The downside is that it takes a bit longer to perform a moving GC and you also need enough
space to house the new hotstore while the old one is still live.
## Operation
When the splitstore is first enabled, the existing blockstore becomes
the coldstore and a fresh hotstore is initialized.
The hotstore is warmed up on first startup so as to load all chain
headers and state roots in the current head. This allows us to
immediately gain the performance benefits of a smallerblockstore which
can be substantial for full archival nodes.
All new writes are directed to the hotstore, while reads first hit the
hotstore, with fallback to the coldstore.
Once 5 finalities have ellapsed, and every finality henceforth, the
blockstore _compacts_. Compaction is the process of moving all
unreachable objects within the last 4 finalities from the hotstore to
the coldstore. If the system is configured with a discard coldstore,
these objects are discarded. Note that chain headers, all the way to
genesis, are considered reachable. Stateroots and messages are
considered reachable only within the last 4 finalities, unless there
is a live reference to them.
## Compaction
Compaction works transactionally with the following algorithm:
- We prepare a transaction, whereby all i/o referenced objects through the API are tracked.
- We walk the chain and mark reachable objects, keeping 4 finalities of state roots and messages and all headers all the way to genesis.
- Once the chain walk is complete, we begin full transaction protection with concurrent marking; we walk and mark all references created during the chain walk. On the same time, all I/O through the API concurrently marks objects as live references.
- We collect cold objects by iterating through the hotstore and checking the mark set; if an object is not marked, then it is candidate for purge.
- When running with a coldstore, we next copy all cold objects to the coldstore.
- At this point we are ready to begin purging:
- We sort cold objects heaviest first, so as to never delete the consituents of a DAG before the DAG itself (which would leave dangling references)
- We delete in small batches taking a lock; each batch is checked again for marks, from the concurrent transactional mark, so as to never delete anything live
- We then end the transaction and compact/gc the hotstore.
As of [#8008](https://github.com/filecoin-project/lotus/pull/8008) the compaction algorithm has been
modified to eliminate sorting and maintain the cold object set on disk. This drastically reduces
memory usage; in fact, when using badger as the markset compaction uses very little memory, and
it should be now possible to run splitstore with 32GB of RAM or less without danger of running out of
memory during compaction.
## Garbage Collection
TBD -- see [#6577](https://github.com/filecoin-project/lotus/issues/6577)
## Utilities
`lotus-shed` has a `splitstore` command which provides some utilities:
- `rollback` -- rolls back a splitstore installation.
This command copies the hotstore on top of the coldstore, and then deletes the splitstore
directory and associated metadata keys.
It can also optionally compact/gc the coldstore after the copy (with the `--gc-coldstore` flag)
and automatically rewrite the lotus config to disable splitstore (with the `--rewrite-config` flag).
Note: the node *must be stopped* before running this command.
- `clear` -- clears a splitstore installation for restart from snapshot.
- `check` -- asynchronously runs a basic healthcheck on the splitstore.
The results are appended to `<lotus-repo>/datastore/splitstore/check.txt`.
- `info` -- prints some basic information about the splitstore.

Some files were not shown because too many files have changed in this diff Show More