Compare commits
9 Commits
v1.27.1-rc
...
master
Author | SHA1 | Date | |
---|---|---|---|
f27f2a87ee | |||
758dd13965 | |||
fc8600e1c0 | |||
a9038d28bc | |||
54d35ac88c | |||
0b6cf58995 | |||
f7a51f0387 | |||
fdb1108154 | |||
6d47bcba54 |
1383
.circleci/config.yml
Normal file
1383
.circleci/config.yml
Normal file
File diff suppressed because it is too large
Load Diff
156
.circleci/gen.go
Normal file
156
.circleci/gen.go
Normal file
@ -0,0 +1,156 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
)
|
||||||
|
|
||||||
|
var GoVersion = "" // from init below. Ex: 1.19.7
|
||||||
|
|
||||||
|
//go:generate go run ./gen.go ..
|
||||||
|
|
||||||
|
//go:embed template.yml
|
||||||
|
var templateFile embed.FS
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
b, err := os.ReadFile("../go.mod")
|
||||||
|
if err != nil {
|
||||||
|
panic("cannot find go.mod in parent folder")
|
||||||
|
}
|
||||||
|
for _, line := range strings.Split(string(b), "\n") {
|
||||||
|
if strings.HasPrefix(line, "go ") {
|
||||||
|
GoVersion = line[3:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
dirs = []string
|
||||||
|
suite = string
|
||||||
|
)
|
||||||
|
|
||||||
|
// groupedUnitTests maps suite names to top-level directories that should be
|
||||||
|
// included in that suite. The program adds an implicit group "rest" that
|
||||||
|
// includes all other top-level directories.
|
||||||
|
var groupedUnitTests = map[suite]dirs{
|
||||||
|
"unit-node": {"node"},
|
||||||
|
"unit-storage": {"storage", "extern"},
|
||||||
|
"unit-cli": {"cli", "cmd", "api"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
panic("expected path to repo as argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
repo := os.Args[1]
|
||||||
|
|
||||||
|
tmpl := template.New("template.yml")
|
||||||
|
tmpl.Delims("[[", "]]")
|
||||||
|
tmpl.Funcs(template.FuncMap{
|
||||||
|
"stripSuffix": func(in string) string {
|
||||||
|
return strings.TrimSuffix(in, "_test.go")
|
||||||
|
},
|
||||||
|
})
|
||||||
|
tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
|
||||||
|
|
||||||
|
// list all itests.
|
||||||
|
itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// strip the dir from all entries.
|
||||||
|
for i, f := range itests {
|
||||||
|
itests[i] = filepath.Base(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculate the exclusion set of unit test directories to exclude because
|
||||||
|
// they are already included in a grouped suite.
|
||||||
|
var excluded = map[string]struct{}{}
|
||||||
|
for _, ss := range groupedUnitTests {
|
||||||
|
for _, s := range ss {
|
||||||
|
e, err := filepath.Abs(filepath.Join(repo, s))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
// Redundantly flag both absolute and relative paths as excluded
|
||||||
|
excluded[filepath.Join(repo, s)] = struct{}{}
|
||||||
|
excluded[e] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// all unit tests top-level dirs that are not itests, nor included in other suites.
|
||||||
|
var rest = map[string]struct{}{}
|
||||||
|
err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
|
||||||
|
// include all tests that aren't in the itests directory.
|
||||||
|
if strings.Contains(path, "itests") {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
// exclude all tests included in other suites
|
||||||
|
if f.IsDir() {
|
||||||
|
if _, ok := excluded[path]; ok {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if strings.HasSuffix(path, "_test.go") {
|
||||||
|
rel, err := filepath.Rel(repo, path)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
// take the first directory
|
||||||
|
rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add other directories to a 'rest' suite.
|
||||||
|
for k := range rest {
|
||||||
|
groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// map iteration guarantees no order, so sort the array in-place.
|
||||||
|
sort.Strings(groupedUnitTests["unit-rest"])
|
||||||
|
|
||||||
|
// form the input data.
|
||||||
|
type data struct {
|
||||||
|
Networks []string
|
||||||
|
ItestFiles []string
|
||||||
|
UnitSuites map[string]string
|
||||||
|
GoVersion string
|
||||||
|
}
|
||||||
|
in := data{
|
||||||
|
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
|
||||||
|
ItestFiles: itests,
|
||||||
|
UnitSuites: func() map[string]string {
|
||||||
|
ret := make(map[string]string)
|
||||||
|
for name, dirs := range groupedUnitTests {
|
||||||
|
for i, d := range dirs {
|
||||||
|
dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
|
||||||
|
}
|
||||||
|
ret[name] = strings.Join(dirs, " ")
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}(),
|
||||||
|
GoVersion: GoVersion,
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := os.Create("./config.yml")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
// execute the template.
|
||||||
|
if err := tmpl.Execute(out, in); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
759
.circleci/template.yml
Normal file
759
.circleci/template.yml
Normal file
@ -0,0 +1,759 @@
|
|||||||
|
version: 2.1
|
||||||
|
orbs:
|
||||||
|
aws-cli: circleci/aws-cli@4.1.1
|
||||||
|
docker: circleci/docker@2.3.0
|
||||||
|
|
||||||
|
executors:
|
||||||
|
golang:
|
||||||
|
docker:
|
||||||
|
# Must match GO_VERSION_MIN in project root
|
||||||
|
- image: cimg/go:1.21.7
|
||||||
|
resource_class: medium+
|
||||||
|
golang-2xl:
|
||||||
|
docker:
|
||||||
|
# Must match GO_VERSION_MIN in project root
|
||||||
|
- image: cimg/go:1.21.7
|
||||||
|
resource_class: 2xlarge
|
||||||
|
ubuntu:
|
||||||
|
docker:
|
||||||
|
- image: ubuntu:20.04
|
||||||
|
|
||||||
|
commands:
|
||||||
|
build-platform-specific:
|
||||||
|
parameters:
|
||||||
|
linux:
|
||||||
|
default: true
|
||||||
|
description: is a linux build environment?
|
||||||
|
type: boolean
|
||||||
|
darwin:
|
||||||
|
default: false
|
||||||
|
description: is a darwin build environment?
|
||||||
|
type: boolean
|
||||||
|
darwin-architecture:
|
||||||
|
default: "amd64"
|
||||||
|
description: which darwin architecture is being used?
|
||||||
|
type: string
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- git_fetch_all_tags
|
||||||
|
- run: git submodule sync
|
||||||
|
- run: git submodule update --init
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.linux>>
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- check-go-version
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.darwin>>
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: Install Go
|
||||||
|
command: |
|
||||||
|
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-<<parameters.darwin-architecture>>.pkg -o /tmp/go.pkg && \
|
||||||
|
sudo installer -pkg /tmp/go.pkg -target /
|
||||||
|
- run:
|
||||||
|
name: Export Go
|
||||||
|
command: |
|
||||||
|
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
|
||||||
|
- run: go version
|
||||||
|
- run:
|
||||||
|
name: Install dependencies with Homebrew
|
||||||
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
|
||||||
|
- run:
|
||||||
|
name: Install Rust
|
||||||
|
command: |
|
||||||
|
curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||||
|
- run: make deps
|
||||||
|
download-params:
|
||||||
|
steps:
|
||||||
|
- restore_cache:
|
||||||
|
name: Restore parameters cache
|
||||||
|
keys:
|
||||||
|
- 'v26-2k-lotus-params'
|
||||||
|
- run: ./lotus fetch-params 2048
|
||||||
|
- save_cache:
|
||||||
|
name: Save parameters cache
|
||||||
|
key: 'v26-2k-lotus-params'
|
||||||
|
paths:
|
||||||
|
- /var/tmp/filecoin-proof-parameters/
|
||||||
|
install_ipfs:
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
|
||||||
|
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
|
||||||
|
pushd kubo
|
||||||
|
sudo bash install.sh
|
||||||
|
popd
|
||||||
|
rm -rf kubo
|
||||||
|
rm kubo_v0.16.0_linux-amd64.tar.gz
|
||||||
|
git_fetch_all_tags:
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: fetch all tags
|
||||||
|
command: |
|
||||||
|
git fetch --all
|
||||||
|
install-ubuntu-deps:
|
||||||
|
steps:
|
||||||
|
- run: sudo apt install curl ca-certificates gnupg
|
||||||
|
- run: sudo apt-get update
|
||||||
|
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
|
||||||
|
check-go-version:
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
||||||
|
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
|
||||||
|
echo "GO_VERSION_MIN file does not match the go version being used."
|
||||||
|
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
executor: golang
|
||||||
|
working_directory: ~/lotus
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- git_fetch_all_tags
|
||||||
|
- run: git submodule sync
|
||||||
|
- run: git submodule update --init
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- check-go-version
|
||||||
|
- run: make deps lotus
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: ~/
|
||||||
|
paths:
|
||||||
|
- "lotus"
|
||||||
|
mod-tidy-check:
|
||||||
|
executor: golang
|
||||||
|
working_directory: ~/lotus
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- run: go mod tidy -v
|
||||||
|
- run:
|
||||||
|
name: Check git diff
|
||||||
|
command: |
|
||||||
|
git --no-pager diff go.mod go.sum
|
||||||
|
git --no-pager diff --quiet go.mod go.sum
|
||||||
|
|
||||||
|
test:
|
||||||
|
description: |
|
||||||
|
Run tests with gotestsum.
|
||||||
|
working_directory: ~/lotus
|
||||||
|
parameters: &test-params
|
||||||
|
resource_class:
|
||||||
|
type: string
|
||||||
|
default: medium+
|
||||||
|
go-test-flags:
|
||||||
|
type: string
|
||||||
|
default: "-timeout 20m"
|
||||||
|
description: Flags passed to go test.
|
||||||
|
target:
|
||||||
|
type: string
|
||||||
|
default: "./..."
|
||||||
|
description: Import paths of packages to be tested.
|
||||||
|
proofs-log-test:
|
||||||
|
type: string
|
||||||
|
default: "0"
|
||||||
|
get-params:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
suite:
|
||||||
|
type: string
|
||||||
|
default: unit
|
||||||
|
description: Test suite name to report to CircleCI.
|
||||||
|
docker:
|
||||||
|
- image: cimg/go:[[ .GoVersion]]
|
||||||
|
environment:
|
||||||
|
LOTUS_HARMONYDB_HOSTS: yugabyte
|
||||||
|
- image: yugabytedb/yugabyte:2.18.0.0-b65
|
||||||
|
command: bin/yugabyted start --daemon=false
|
||||||
|
name: yugabyte
|
||||||
|
resource_class: << parameters.resource_class >>
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- when:
|
||||||
|
condition: << parameters.get-params >>
|
||||||
|
steps:
|
||||||
|
- download-params
|
||||||
|
- run:
|
||||||
|
name: go test
|
||||||
|
environment:
|
||||||
|
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
|
||||||
|
SKIP_CONFORMANCE: "1"
|
||||||
|
LOTUS_SRC_DIR: /home/circleci/project
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/test-reports/<< parameters.suite >>
|
||||||
|
mkdir -p /tmp/test-artifacts
|
||||||
|
dockerize -wait tcp://yugabyte:5433 -timeout 3m
|
||||||
|
env
|
||||||
|
gotestsum \
|
||||||
|
--format standard-verbose \
|
||||||
|
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
|
||||||
|
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
|
||||||
|
--packages="<< parameters.target >>" \
|
||||||
|
-- << parameters.go-test-flags >>
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/test-artifacts/<< parameters.suite >>.json
|
||||||
|
|
||||||
|
test-conformance:
|
||||||
|
working_directory: ~/lotus
|
||||||
|
description: |
|
||||||
|
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||||
|
implementations to test their correctness and compliance with the Filecoin
|
||||||
|
specifications.
|
||||||
|
parameters:
|
||||||
|
<<: *test-params
|
||||||
|
vectors-branch:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: |
|
||||||
|
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||||
|
test with. If empty (the default) the commit defined by the git
|
||||||
|
submodule is used.
|
||||||
|
docker:
|
||||||
|
- image: cimg/go:[[ .GoVersion]]
|
||||||
|
resource_class: << parameters.resource_class >>
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- download-params
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not:
|
||||||
|
equal: [ "", << parameters.vectors-branch >> ]
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: checkout vectors branch
|
||||||
|
command: |
|
||||||
|
cd extern/test-vectors
|
||||||
|
git fetch
|
||||||
|
git checkout origin/<< parameters.vectors-branch >>
|
||||||
|
- run:
|
||||||
|
name: install statediff globally
|
||||||
|
command: |
|
||||||
|
## statediff is optional; we succeed even if compilation fails.
|
||||||
|
mkdir -p /tmp/statediff
|
||||||
|
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
|
||||||
|
cd /tmp/statediff
|
||||||
|
go install ./cmd/statediff || exit 0
|
||||||
|
- run:
|
||||||
|
name: go test
|
||||||
|
environment:
|
||||||
|
SKIP_CONFORMANCE: "0"
|
||||||
|
command: |
|
||||||
|
mkdir -p /tmp/test-reports
|
||||||
|
mkdir -p /tmp/test-artifacts
|
||||||
|
gotestsum \
|
||||||
|
--format pkgname-and-test-fails \
|
||||||
|
--junitfile /tmp/test-reports/junit.xml \
|
||||||
|
-- \
|
||||||
|
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
|
||||||
|
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
no_output_timeout: 30m
|
||||||
|
- store_test_results:
|
||||||
|
path: /tmp/test-reports
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/test-artifacts/conformance-coverage.html
|
||||||
|
|
||||||
|
build-linux-amd64:
|
||||||
|
executor: golang
|
||||||
|
steps:
|
||||||
|
- build-platform-specific
|
||||||
|
- run: make lotus lotus-miner lotus-worker
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
|
- run: |
|
||||||
|
mkdir -p /tmp/workspace/linux_amd64_v1 && \
|
||||||
|
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: /tmp/workspace
|
||||||
|
paths:
|
||||||
|
- linux_amd64_v1
|
||||||
|
|
||||||
|
build-darwin-amd64:
|
||||||
|
description: build darwin lotus binary
|
||||||
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
|
macos:
|
||||||
|
xcode: "13.4.1"
|
||||||
|
steps:
|
||||||
|
- build-platform-specific:
|
||||||
|
linux: false
|
||||||
|
darwin: true
|
||||||
|
darwin-architecture: amd64
|
||||||
|
- run: make lotus lotus-miner lotus-worker
|
||||||
|
- run: otool -hv lotus
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
|
- run: |
|
||||||
|
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
|
||||||
|
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: /tmp/workspace
|
||||||
|
paths:
|
||||||
|
- darwin_amd64_v1
|
||||||
|
|
||||||
|
build-darwin-arm64:
|
||||||
|
description: self-hosted m1 runner
|
||||||
|
working_directory: ~/go/src/github.com/filecoin-project/lotus
|
||||||
|
machine: true
|
||||||
|
resource_class: filecoin-project/self-hosted-m1
|
||||||
|
steps:
|
||||||
|
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
|
||||||
|
- build-platform-specific:
|
||||||
|
linux: false
|
||||||
|
darwin: true
|
||||||
|
darwin-architecture: arm64
|
||||||
|
- run: |
|
||||||
|
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
|
||||||
|
- run: otool -hv lotus
|
||||||
|
- run:
|
||||||
|
name: check tag and version output match
|
||||||
|
command: ./scripts/version-check.sh ./lotus
|
||||||
|
- run: |
|
||||||
|
mkdir -p /tmp/workspace/darwin_arm64 && \
|
||||||
|
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
|
||||||
|
- persist_to_workspace:
|
||||||
|
root: /tmp/workspace
|
||||||
|
paths:
|
||||||
|
- darwin_arm64
|
||||||
|
- run:
|
||||||
|
command: make clean
|
||||||
|
when: always
|
||||||
|
- run:
|
||||||
|
name: cleanup homebrew
|
||||||
|
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
|
||||||
|
when: always
|
||||||
|
|
||||||
|
release:
|
||||||
|
executor: golang
|
||||||
|
parameters:
|
||||||
|
dry-run:
|
||||||
|
default: false
|
||||||
|
description: should this release actually publish it's artifacts?
|
||||||
|
type: boolean
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: |
|
||||||
|
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install goreleaser-pro
|
||||||
|
- install_ipfs
|
||||||
|
- attach_workspace:
|
||||||
|
at: /tmp/workspace
|
||||||
|
- when:
|
||||||
|
condition: << parameters.dry-run >>
|
||||||
|
steps:
|
||||||
|
- run: goreleaser release --rm-dist --snapshot --debug
|
||||||
|
- run: ./scripts/generate-checksums.sh
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not: << parameters.dry-run >>
|
||||||
|
steps:
|
||||||
|
- run: goreleaser release --rm-dist --debug
|
||||||
|
- run: ./scripts/generate-checksums.sh
|
||||||
|
- run: ./scripts/publish-checksums.sh
|
||||||
|
|
||||||
|
gofmt:
|
||||||
|
executor: golang
|
||||||
|
working_directory: ~/lotus
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
command: "! go fmt ./... 2>&1 | read"
|
||||||
|
|
||||||
|
gen-check:
|
||||||
|
executor: golang
|
||||||
|
working_directory: ~/lotus
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||||
|
- run: make gen
|
||||||
|
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||||
|
- run: make docsgen-cli
|
||||||
|
- run: git --no-pager diff && git --no-pager diff --quiet
|
||||||
|
|
||||||
|
docs-check:
|
||||||
|
executor: golang
|
||||||
|
working_directory: ~/lotus
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- run: go install golang.org/x/tools/cmd/goimports
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
|
||||||
|
- run: make docsgen
|
||||||
|
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
|
||||||
|
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
|
||||||
|
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
|
||||||
|
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
|
||||||
|
|
||||||
|
lint-all:
|
||||||
|
description: |
|
||||||
|
Run golangci-lint.
|
||||||
|
working_directory: ~/lotus
|
||||||
|
parameters:
|
||||||
|
args:
|
||||||
|
type: string
|
||||||
|
default: ''
|
||||||
|
description: |
|
||||||
|
Arguments to pass to golangci-lint
|
||||||
|
docker:
|
||||||
|
- image: cimg/go:[[ .GoVersion]]
|
||||||
|
resource_class: medium+
|
||||||
|
steps:
|
||||||
|
- install-ubuntu-deps
|
||||||
|
- attach_workspace:
|
||||||
|
at: ~/
|
||||||
|
- run:
|
||||||
|
name: Lint
|
||||||
|
command: |
|
||||||
|
golangci-lint run -v --timeout 10m \
|
||||||
|
--concurrency 4 << parameters.args >>
|
||||||
|
|
||||||
|
build-docker:
|
||||||
|
description: >
|
||||||
|
Publish to Dockerhub
|
||||||
|
executor: docker/docker
|
||||||
|
parameters:
|
||||||
|
image:
|
||||||
|
type: string
|
||||||
|
default: lotus
|
||||||
|
description: >
|
||||||
|
Passed to the docker build process to determine which image in the
|
||||||
|
Dockerfile should be built. Expected values are `lotus`,
|
||||||
|
`lotus-all-in-one`
|
||||||
|
network:
|
||||||
|
type: string
|
||||||
|
default: "mainnet"
|
||||||
|
description: >
|
||||||
|
Passed to the docker build process using GOFLAGS+=-tags=<<network>>.
|
||||||
|
Expected values are `debug`, `2k`, `calibnet`, `butterflynet`,
|
||||||
|
`interopnet`.
|
||||||
|
channel:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
description: >
|
||||||
|
The release channel to use for this image.
|
||||||
|
push:
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
description: >
|
||||||
|
When true, pushes the image to Dockerhub
|
||||||
|
steps:
|
||||||
|
- setup_remote_docker
|
||||||
|
- checkout
|
||||||
|
- git_fetch_all_tags
|
||||||
|
- run: git submodule sync
|
||||||
|
- run: git submodule update --init
|
||||||
|
|
||||||
|
- docker/check:
|
||||||
|
docker-username: DOCKERHUB_USERNAME
|
||||||
|
docker-password: DOCKERHUB_PASSWORD
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
equal: [ mainnet, <<parameters.network>> ]
|
||||||
|
steps:
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.push>>
|
||||||
|
steps:
|
||||||
|
- docker/build:
|
||||||
|
image: filecoin/<<parameters.image>>
|
||||||
|
extra_build_args: --target <<parameters.image>>
|
||||||
|
tag: <<parameters.channel>>
|
||||||
|
- run:
|
||||||
|
name: Docker push
|
||||||
|
command: |
|
||||||
|
docker push filecoin/<<parameters.image>>:<<parameters.channel>>
|
||||||
|
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||||
|
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
|
||||||
|
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
|
||||||
|
fi
|
||||||
|
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||||
|
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
|
||||||
|
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
|
||||||
|
fi
|
||||||
|
- unless:
|
||||||
|
condition: <<parameters.push>>
|
||||||
|
steps:
|
||||||
|
- docker/build:
|
||||||
|
image: filecoin/<<parameters.image>>
|
||||||
|
extra_build_args: --target <<parameters.image>>
|
||||||
|
- when:
|
||||||
|
condition:
|
||||||
|
not:
|
||||||
|
equal: [ mainnet, <<parameters.network>> ]
|
||||||
|
steps:
|
||||||
|
- when:
|
||||||
|
condition: <<parameters.push>>
|
||||||
|
steps:
|
||||||
|
- docker/build:
|
||||||
|
image: filecoin/<<parameters.image>>
|
||||||
|
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
|
||||||
|
tag: <<parameters.channel>>-<<parameters.network>>
|
||||||
|
- run:
|
||||||
|
name: Docker push
|
||||||
|
command: |
|
||||||
|
docker push filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>>
|
||||||
|
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
|
||||||
|
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
|
||||||
|
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
|
||||||
|
fi
|
||||||
|
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
|
||||||
|
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
|
||||||
|
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
|
||||||
|
fi
|
||||||
|
- unless:
|
||||||
|
condition: <<parameters.push>>
|
||||||
|
steps:
|
||||||
|
- docker/build:
|
||||||
|
image: filecoin/<<parameters.image>>
|
||||||
|
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
|
||||||
|
|
||||||
|
workflows:
|
||||||
|
ci:
|
||||||
|
jobs:
|
||||||
|
- build
|
||||||
|
- lint-all:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
- mod-tidy-check:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
- gofmt:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
- gen-check:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
- docs-check:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
|
||||||
|
[[- range $file := .ItestFiles -]]
|
||||||
|
[[ with $name := $file | stripSuffix ]]
|
||||||
|
- test:
|
||||||
|
name: test-itest-[[ $name ]]
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
suite: itest-[[ $name ]]
|
||||||
|
target: "./itests/[[ $file ]]"
|
||||||
|
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config") (eq $name "sector_pledge")]]
|
||||||
|
resource_class: 2xlarge
|
||||||
|
[[- end]]
|
||||||
|
[[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
|
||||||
|
get-params: true
|
||||||
|
[[end]]
|
||||||
|
[[- end ]][[- end]]
|
||||||
|
|
||||||
|
[[- range $suite, $pkgs := .UnitSuites]]
|
||||||
|
- test:
|
||||||
|
name: test-[[ $suite ]]
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
suite: utest-[[ $suite ]]
|
||||||
|
target: "[[ $pkgs ]]"
|
||||||
|
[[- if eq $suite "unit-storage"]]
|
||||||
|
get-params: true
|
||||||
|
[[- end -]]
|
||||||
|
[[- if eq $suite "unit-cli"]]
|
||||||
|
resource_class: 2xlarge
|
||||||
|
get-params: true
|
||||||
|
[[- end -]]
|
||||||
|
[[- if eq $suite "unit-rest"]]
|
||||||
|
resource_class: 2xlarge
|
||||||
|
[[- end -]]
|
||||||
|
[[- end]]
|
||||||
|
- test:
|
||||||
|
go-test-flags: "-run=TestMulticoreSDR"
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
suite: multicore-sdr-check
|
||||||
|
target: "./storage/sealer/ffiwrapper"
|
||||||
|
proofs-log-test: "1"
|
||||||
|
- test-conformance:
|
||||||
|
requires:
|
||||||
|
- build
|
||||||
|
suite: conformance
|
||||||
|
target: "./conformance"
|
||||||
|
|
||||||
|
release:
|
||||||
|
jobs:
|
||||||
|
- build-linux-amd64:
|
||||||
|
name: "Build ( linux / amd64 )"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- /^ci\/.*$/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-darwin-amd64:
|
||||||
|
name: "Build ( darwin / amd64 )"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- /^ci\/.*$/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- build-darwin-arm64:
|
||||||
|
name: "Build ( darwin / arm64 )"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- /^ci\/.*$/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- release:
|
||||||
|
name: "Release"
|
||||||
|
requires:
|
||||||
|
- "Build ( darwin / amd64 )"
|
||||||
|
- "Build ( linux / amd64 )"
|
||||||
|
- "Build ( darwin / arm64 )"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /^.*$/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- release:
|
||||||
|
name: "Release (dry-run)"
|
||||||
|
dry-run: true
|
||||||
|
requires:
|
||||||
|
- "Build ( darwin / amd64 )"
|
||||||
|
- "Build ( linux / amd64 )"
|
||||||
|
- "Build ( darwin / arm64 )"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
- /^ci\/.*$/
|
||||||
|
[[- range .Networks]]
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus-all-in-one / stable / [[.]])"
|
||||||
|
image: lotus-all-in-one
|
||||||
|
channel: stable
|
||||||
|
network: [[.]]
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus-all-in-one / candidate / [[.]])"
|
||||||
|
image: lotus-all-in-one
|
||||||
|
channel: candidate
|
||||||
|
network: [[.]]
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus-all-in-one / edge / [[.]])"
|
||||||
|
image: lotus-all-in-one
|
||||||
|
channel: master
|
||||||
|
network: [[.]]
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker build (lotus-all-in-one / [[.]])"
|
||||||
|
image: lotus-all-in-one
|
||||||
|
network: [[.]]
|
||||||
|
push: false
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
[[- end]]
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus / stable / mainnet)"
|
||||||
|
image: lotus
|
||||||
|
channel: stable
|
||||||
|
network: mainnet
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+$/
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus / candidate / mainnet)"
|
||||||
|
image: lotus
|
||||||
|
channel: candidate
|
||||||
|
network: mainnet
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore:
|
||||||
|
- /.*/
|
||||||
|
tags:
|
||||||
|
only:
|
||||||
|
- /^v\d+\.\d+\.\d+-rc\d+$/
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker push (lotus / master / mainnet)"
|
||||||
|
image: lotus
|
||||||
|
channel: master
|
||||||
|
network: mainnet
|
||||||
|
push: true
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker build (lotus / mainnet)"
|
||||||
|
image: lotus
|
||||||
|
network: mainnet
|
||||||
|
push: false
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
|
||||||
|
|
||||||
|
nightly:
|
||||||
|
triggers:
|
||||||
|
- schedule:
|
||||||
|
cron: "0 0 * * *"
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
jobs:
|
||||||
|
[[- range .Networks]]
|
||||||
|
- build-docker:
|
||||||
|
name: "Docker (lotus-all-in-one / nightly / [[.]])"
|
||||||
|
image: lotus-all-in-one
|
||||||
|
channel: nightly
|
||||||
|
network: [[.]]
|
||||||
|
push: true
|
||||||
|
[[- end]]
|
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@ -1,3 +1,6 @@
|
|||||||
# Reference
|
# Reference
|
||||||
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
|
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
|
||||||
|
|
||||||
|
# Global owners
|
||||||
|
# Ensure maintainers team is a requested reviewer for non-draft PRs
|
||||||
|
* @filecoin-project/lotus-maintainers
|
||||||
|
23
.github/actions/install-go/action.yml
vendored
23
.github/actions/install-go/action.yml
vendored
@ -1,23 +0,0 @@
|
|||||||
name: Install Go
|
|
||||||
description: Install Go for Filecoin Lotus
|
|
||||||
|
|
||||||
inputs:
|
|
||||||
working-directory:
|
|
||||||
description: Specifies the working directory where the command is run.
|
|
||||||
required: false
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: stable
|
|
||||||
cache: false
|
|
||||||
- id: go-mod
|
|
||||||
uses: ipdxco/unified-github-workflows/.github/actions/read-go-mod@main
|
|
||||||
with:
|
|
||||||
working-directory: ${{ inputs.working-directory || github.workspace }}
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ fromJSON(steps.go-mod.outputs.json).Go }}.x
|
|
||||||
cache: false
|
|
@ -1,21 +0,0 @@
|
|||||||
name: Install System Dependencies
|
|
||||||
description: Install System dependencies for Filecoin Lotus
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- if: runner.os == 'Linux'
|
|
||||||
run: |
|
|
||||||
# List processes to enable debugging in case /var/lib/apt/lists/ is locked
|
|
||||||
ps aux
|
|
||||||
sudo apt-get update -y
|
|
||||||
sudo apt-get install -y ocl-icd-opencl-dev libhwloc-dev pkg-config
|
|
||||||
shell: bash
|
|
||||||
- if: runner.os == 'macOS'
|
|
||||||
env:
|
|
||||||
HOMEBREW_NO_AUTO_UPDATE: '1'
|
|
||||||
run: |
|
|
||||||
brew install hwloc pkg-config
|
|
||||||
echo "CPATH=$(brew --prefix)/include" | tee -a $GITHUB_ENV
|
|
||||||
echo "LIBRARY_PATH=$(brew --prefix)/lib" | tee -a $GITHUB_ENV
|
|
||||||
shell: bash
|
|
16
.github/actions/start-yugabytedb/action.yml
vendored
16
.github/actions/start-yugabytedb/action.yml
vendored
@ -1,16 +0,0 @@
|
|||||||
name: Start YugabyteDB
|
|
||||||
description: Install Yugabyte Database for Filecoin Lotus
|
|
||||||
|
|
||||||
runs:
|
|
||||||
using: composite
|
|
||||||
steps:
|
|
||||||
- run: docker run --rm --name yugabyte -d -p 5433:5433 yugabytedb/yugabyte:2.21.0.1-b1 bin/yugabyted start --daemon=false
|
|
||||||
shell: bash
|
|
||||||
- run: |
|
|
||||||
while true; do
|
|
||||||
status=$(docker exec yugabyte bin/yugabyted status);
|
|
||||||
echo $status;
|
|
||||||
echo $status | grep Running && break;
|
|
||||||
sleep 1;
|
|
||||||
done
|
|
||||||
shell: bash
|
|
248
.github/labels.yml
vendored
Normal file
248
.github/labels.yml
vendored
Normal file
@ -0,0 +1,248 @@
|
|||||||
|
###
|
||||||
|
### Special magic GitHub labels
|
||||||
|
### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels
|
||||||
|
#
|
||||||
|
- name: "good first issue"
|
||||||
|
color: 7057ff
|
||||||
|
description: "Good for newcomers"
|
||||||
|
- name: "help wanted"
|
||||||
|
color: 008672
|
||||||
|
description: "Extra attention is needed"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Goals
|
||||||
|
#
|
||||||
|
- name: goal/incentives
|
||||||
|
color: ff004d
|
||||||
|
description: "Incentinet"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Areas
|
||||||
|
#
|
||||||
|
- name: area/ux
|
||||||
|
color: 00A4E0
|
||||||
|
description: "Area: UX"
|
||||||
|
- name: area/chain/vm
|
||||||
|
color: 00A4E2
|
||||||
|
description: "Area: Chain/VM"
|
||||||
|
- name: area/chain/sync
|
||||||
|
color: 00A4E4
|
||||||
|
description: "Area: Chain/Sync"
|
||||||
|
- name: area/chain/misc
|
||||||
|
color: 00A4E6
|
||||||
|
description: "Area: Chain/Misc"
|
||||||
|
- name: area/markets
|
||||||
|
color: 00A4E8
|
||||||
|
description: "Area: Markets"
|
||||||
|
- name: area/sealing/fsm
|
||||||
|
color: 0bb1ed
|
||||||
|
description: "Area: Sealing/FSM"
|
||||||
|
- name: area/sealing/storage
|
||||||
|
color: 0EB4F0
|
||||||
|
description: "Area: Sealing/Storage"
|
||||||
|
- name: area/proving
|
||||||
|
color: 0EB4F0
|
||||||
|
description: "Area: Proving"
|
||||||
|
- name: area/mining
|
||||||
|
color: 10B6F2
|
||||||
|
description: "Area: Mining"
|
||||||
|
- name: area/client/storage
|
||||||
|
color: 13B9F5
|
||||||
|
description: "Area: Client/Storage"
|
||||||
|
- name: area/client/retrieval
|
||||||
|
color: 15BBF7
|
||||||
|
description: "Area: Client/Retrieval"
|
||||||
|
- name: area/wallet
|
||||||
|
color: 15BBF7
|
||||||
|
description: "Area: Wallet"
|
||||||
|
- name: area/payment-channel
|
||||||
|
color: ff6767
|
||||||
|
description: "Area: Payment Channel"
|
||||||
|
- name: area/multisig
|
||||||
|
color: fff0ff
|
||||||
|
description: "Area: Multisig"
|
||||||
|
- name: area/networking
|
||||||
|
color: 273f8a
|
||||||
|
description: "Area: Networking"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Kinds
|
||||||
|
#
|
||||||
|
- name: kind/bug
|
||||||
|
color: c92712
|
||||||
|
description: "Kind: Bug"
|
||||||
|
- name: kind/chore
|
||||||
|
color: fcf0b5
|
||||||
|
description: "Kind: Chore"
|
||||||
|
- name: kind/feature
|
||||||
|
color: FFF3B8
|
||||||
|
description: "Kind: Feature"
|
||||||
|
- name: kind/improvement
|
||||||
|
color: FFF5BA
|
||||||
|
description: "Kind: Improvement"
|
||||||
|
- name: kind/test
|
||||||
|
color: FFF8BD
|
||||||
|
description: "Kind: Test"
|
||||||
|
- name: kind/question
|
||||||
|
color: FFFDC2
|
||||||
|
description: "Kind: Question"
|
||||||
|
- name: kind/enhancement
|
||||||
|
color: FFFFC5
|
||||||
|
description: "Kind: Enhancement"
|
||||||
|
- name: kind/discussion
|
||||||
|
color: FFFFC7
|
||||||
|
description: "Kind: Discussion"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Difficulties
|
||||||
|
#
|
||||||
|
- name: dif/trivial
|
||||||
|
color: b2b7ff
|
||||||
|
description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus"
|
||||||
|
- name: dif/easy
|
||||||
|
color: 7886d7
|
||||||
|
description: "An existing lotus user should be able to pick this up"
|
||||||
|
- name: dif/medium
|
||||||
|
color: 6574cd
|
||||||
|
description: "Prior development experience with lotus is likely helpful"
|
||||||
|
- name: dif/hard
|
||||||
|
color: 5661b3
|
||||||
|
description: "Suggests that having worked on the specific component affected by this issue is important"
|
||||||
|
- name: dif/expert
|
||||||
|
color: 2f365f
|
||||||
|
description: "Requires extensive knowledge of the history, implications, ramifications of the issue"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Efforts
|
||||||
|
#
|
||||||
|
- name: effort/minutes
|
||||||
|
color: e8fffe
|
||||||
|
description: "Effort: Minutes"
|
||||||
|
- name: effort/hours
|
||||||
|
color: a0f0ed
|
||||||
|
description: "Effort: Hours"
|
||||||
|
- name: effort/day
|
||||||
|
color: 64d5ca
|
||||||
|
description: "Effort: One Day"
|
||||||
|
- name: effort/days
|
||||||
|
color: 4dc0b5
|
||||||
|
description: "Effort: Multiple Days"
|
||||||
|
- name: effort/week
|
||||||
|
color: 38a89d
|
||||||
|
description: "Effort: One Week"
|
||||||
|
- name: effort/weeks
|
||||||
|
color: 20504f
|
||||||
|
description: "Effort: Multiple Weeks"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Impacts
|
||||||
|
#
|
||||||
|
- name: impact/regression
|
||||||
|
color: f1f5f8
|
||||||
|
description: "Impact: Regression"
|
||||||
|
- name: impact/api-breakage
|
||||||
|
color: ECF0F3
|
||||||
|
description: "Impact: API Breakage"
|
||||||
|
- name: impact/quality
|
||||||
|
color: E7EBEE
|
||||||
|
description: "Impact: Quality"
|
||||||
|
- name: impact/dx
|
||||||
|
color: E2E6E9
|
||||||
|
description: "Impact: Developer Experience"
|
||||||
|
- name: impact/test-flakiness
|
||||||
|
color: DDE1E4
|
||||||
|
description: "Impact: Test Flakiness"
|
||||||
|
- name: impact/consensus
|
||||||
|
color: b20014
|
||||||
|
description: "Impact: Consensus"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Topics
|
||||||
|
#
|
||||||
|
- name: topic/interoperability
|
||||||
|
color: bf0f73
|
||||||
|
description: "Topic: Interoperability"
|
||||||
|
- name: topic/specs
|
||||||
|
color: CC1C80
|
||||||
|
description: "Topic: Specs"
|
||||||
|
- name: topic/docs
|
||||||
|
color: D9298D
|
||||||
|
description: "Topic: Documentation"
|
||||||
|
- name: topic/architecture
|
||||||
|
color: E53599
|
||||||
|
description: "Topic: Architecture"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Priorities
|
||||||
|
###
|
||||||
|
- name: P0
|
||||||
|
color: dd362a
|
||||||
|
description: "P0: Critical Blocker"
|
||||||
|
- name: P1
|
||||||
|
color: ce8048
|
||||||
|
description: "P1: Must be resolved"
|
||||||
|
- name: P2
|
||||||
|
color: dbd81a
|
||||||
|
description: "P2: Should be resolved"
|
||||||
|
- name: P3
|
||||||
|
color: 9fea8f
|
||||||
|
description: "P3: Might get resolved"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Hints
|
||||||
|
#
|
||||||
|
#- name: hint/good-first-issue
|
||||||
|
# color: 7057ff
|
||||||
|
# description: "Hint: Good First Issue"
|
||||||
|
#- name: hint/help-wanted
|
||||||
|
# color: 008672
|
||||||
|
# description: "Hint: Help Wanted"
|
||||||
|
- name: hint/needs-decision
|
||||||
|
color: 33B9A5
|
||||||
|
description: "Hint: Needs Decision"
|
||||||
|
- name: hint/needs-triage
|
||||||
|
color: 1AA08C
|
||||||
|
description: "Hint: Needs Triage"
|
||||||
|
- name: hint/needs-analysis
|
||||||
|
color: 26AC98
|
||||||
|
description: "Hint: Needs Analysis"
|
||||||
|
- name: hint/needs-author-input
|
||||||
|
color: 33B9A5
|
||||||
|
description: "Hint: Needs Author Input"
|
||||||
|
- name: hint/needs-team-input
|
||||||
|
color: 40C6B2
|
||||||
|
description: "Hint: Needs Team Input"
|
||||||
|
- name: hint/needs-community-input
|
||||||
|
color: 4DD3BF
|
||||||
|
description: "Hint: Needs Community Input"
|
||||||
|
- name: hint/needs-review
|
||||||
|
color: 5AE0CC
|
||||||
|
description: "Hint: Needs Review"
|
||||||
|
|
||||||
|
###
|
||||||
|
### Statuses
|
||||||
|
#
|
||||||
|
- name: status/done
|
||||||
|
color: edb3a6
|
||||||
|
description: "Status: Done"
|
||||||
|
- name: status/deferred
|
||||||
|
color: E0A699
|
||||||
|
description: "Status: Deferred"
|
||||||
|
- name: status/in-progress
|
||||||
|
color: D49A8D
|
||||||
|
description: "Status: In Progress"
|
||||||
|
- name: status/blocked
|
||||||
|
color: C78D80
|
||||||
|
description: "Status: Blocked"
|
||||||
|
- name: status/inactive
|
||||||
|
color: BA8073
|
||||||
|
description: "Status: Inactive"
|
||||||
|
- name: status/waiting
|
||||||
|
color: AD7366
|
||||||
|
description: "Status: Waiting"
|
||||||
|
- name: status/rotten
|
||||||
|
color: 7A4033
|
||||||
|
description: "Status: Rotten"
|
||||||
|
- name: status/discarded
|
||||||
|
color: 6D3326
|
||||||
|
description: "Status: Discarded / Won't fix"
|
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@ -15,7 +15,7 @@ Before you mark the PR ready for review, please make sure that:
|
|||||||
- [ ] PR title is in the form of of `<PR type>: <area>: <change being made>`
|
- [ ] PR title is in the form of of `<PR type>: <area>: <change being made>`
|
||||||
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
- example: ` fix: mempool: Introduce a cache for valid signatures`
|
||||||
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
- `PR type`: fix, feat, build, chore, ci, docs, perf, refactor, revert, style, test
|
||||||
- `area`, e.g. api, chain, state, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
- `area`, e.g. api, chain, state, market, mempool, multisig, networking, paych, proving, sealing, wallet, deps
|
||||||
- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
|
- [ ] If the PR affects users (e.g., new feature, bug fix, system requirements change), update the CHANGELOG.md and add details to the UNRELEASED section.
|
||||||
- [ ] New features have usage guidelines and / or documentation updates in
|
- [ ] New features have usage guidelines and / or documentation updates in
|
||||||
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
- [ ] [Lotus Documentation](https://lotus.filecoin.io)
|
||||||
|
32
.github/workflows/build.yml
vendored
32
.github/workflows/build.yml
vendored
@ -1,32 +0,0 @@
|
|||||||
name: Build
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-system-dependencies
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: make deps lotus
|
|
23
.github/workflows/builtin-actor-tests.yml
vendored
23
.github/workflows/builtin-actor-tests.yml
vendored
@ -1,23 +0,0 @@
|
|||||||
name: Built-in Actors
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- build/actors
|
|
||||||
- build/builtin_actors_gen.go
|
|
||||||
branches:
|
|
||||||
- release/*
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
name: Release Tests
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: 1.21
|
|
||||||
- run: go test -tags=release ./build
|
|
82
.github/workflows/check.yml
vendored
82
.github/workflows/check.yml
vendored
@ -1,82 +0,0 @@
|
|||||||
name: Check
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
check-docsgen:
|
|
||||||
name: Check (docs-check)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-system-dependencies
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: go install golang.org/x/tools/cmd/goimports
|
|
||||||
- run: make deps
|
|
||||||
- run: make docsgen
|
|
||||||
- run: git diff --exit-code
|
|
||||||
check-gen:
|
|
||||||
name: Check (gen-check)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-system-dependencies
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: make deps lotus
|
|
||||||
- run: go install golang.org/x/tools/cmd/goimports
|
|
||||||
- run: make gen
|
|
||||||
- run: git diff --exit-code
|
|
||||||
- run: make docsgen-cli
|
|
||||||
- run: git diff --exit-code
|
|
||||||
check-lint:
|
|
||||||
name: Check (lint-all)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-system-dependencies
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.59.0
|
|
||||||
- run: make deps
|
|
||||||
- run: golangci-lint run -v --timeout 10m --concurrency 4
|
|
||||||
check-fmt:
|
|
||||||
name: Check (gofmt)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: go fmt ./...
|
|
||||||
- run: git diff --exit-code
|
|
||||||
check-mod-tidy:
|
|
||||||
name: Check (mod-tidy-check)
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: go mod tidy -v
|
|
||||||
- run: git diff --exit-code
|
|
73
.github/workflows/codeql-analysis.yml
vendored
Normal file
73
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- 'release/*'
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
- 'release/*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: '1.18.8'
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v2
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v2
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v2
|
108
.github/workflows/docker.yml
vendored
108
.github/workflows/docker.yml
vendored
@ -1,108 +0,0 @@
|
|||||||
name: Docker
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
tags:
|
|
||||||
- v*
|
|
||||||
schedule:
|
|
||||||
- cron: '0 0 * * *'
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
ref:
|
|
||||||
description: The GitHub ref (e.g. refs/tags/v1.0.0) to release
|
|
||||||
required: false
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
name: Docker (${{ matrix.image }} / ${{ matrix.network }}) [publish=${{ (inputs.ref || github.ref) == 'refs/heads/master' || startsWith(inputs.ref || github.ref, 'refs/tags/') }}]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
image:
|
|
||||||
- lotus-all-in-one
|
|
||||||
network:
|
|
||||||
- mainnet
|
|
||||||
- butterflynet
|
|
||||||
- calibnet
|
|
||||||
- debug
|
|
||||||
include:
|
|
||||||
- image: lotus
|
|
||||||
network: mainnet
|
|
||||||
env:
|
|
||||||
PUBLISH: ${{ github.ref == 'refs/heads/master' || startsWith(inputs.ref || github.ref, 'refs/tags/') }}
|
|
||||||
steps:
|
|
||||||
- id: channel
|
|
||||||
env:
|
|
||||||
IS_MASTER: ${{ (inputs.ref || github.ref) == 'refs/heads/master' }}
|
|
||||||
IS_TAG: ${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }}
|
|
||||||
IS_RC: ${{ contains(inputs.ref || github.ref, '-rc') }}
|
|
||||||
IS_SCHEDULED: ${{ github.event_name == 'schedule' }}
|
|
||||||
run: |
|
|
||||||
channel=''
|
|
||||||
if [[ "$IS_MASTER" == 'true' ]]; then
|
|
||||||
if [[ "$IS_SCHEDULED" == 'true' ]]; then
|
|
||||||
channel=nightly
|
|
||||||
else
|
|
||||||
channel=master
|
|
||||||
fi
|
|
||||||
elif [[ "$IS_TAG" == 'true' ]]; then
|
|
||||||
if [[ "$IS_RC" == 'true' ]]; then
|
|
||||||
channel=candidate
|
|
||||||
else
|
|
||||||
channel=stable
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
echo "channel=$channel" | tee -a $GITHUB_OUTPUT
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
ref: ${{ inputs.ref || github.ref }}
|
|
||||||
- id: git
|
|
||||||
env:
|
|
||||||
REF: ${{ inputs.ref || github.ref }}
|
|
||||||
run: |
|
|
||||||
ref="${REF#refs/heads/}"
|
|
||||||
ref="${ref#refs/tags/}"
|
|
||||||
sha="$(git rev-parse --short HEAD)"
|
|
||||||
echo "ref=$ref" | tee -a "$GITHUB_OUTPUT"
|
|
||||||
echo "sha=$sha" | tee -a "$GITHUB_OUTPUT"
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
- name: Docker meta
|
|
||||||
id: meta
|
|
||||||
uses: docker/metadata-action@v5
|
|
||||||
with:
|
|
||||||
images: filecoin/${{ matrix.image }}
|
|
||||||
tags: |
|
|
||||||
type=raw,enable=${{ steps.channel.outputs.channel != '' }},value=${{ steps.channel.outputs.channel }}
|
|
||||||
type=raw,enable=${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }},value=${{ steps.git.outputs.ref }}
|
|
||||||
type=raw,value=${{ steps.git.outputs.sha }}
|
|
||||||
flavor: |
|
|
||||||
latest=false
|
|
||||||
suffix=${{ matrix.network != 'mainnet' && format('-{0}', matrix.network) || '' }}
|
|
||||||
- if: env.PUBLISH == 'true'
|
|
||||||
name: Login to Docker Hub
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
- name: Build and push if channel is set (channel=${{ steps.channel.outputs.channel }})
|
|
||||||
uses: docker/build-push-action@v5
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: ${{ env.PUBLISH == 'true' }}
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
|
||||||
build-args: |
|
|
||||||
${{ matrix.network != 'mainnet' && format('GOFLAGS=-tags={0}', matrix.network) || ''}}
|
|
17
.github/workflows/label-syncer.yml
vendored
Normal file
17
.github/workflows/label-syncer.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
|
||||||
|
name: Label syncer
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '.github/labels.yml'
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
name: Sync labels
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@1.0.0
|
||||||
|
- uses: micnncim/action-label-syncer@v1.0.0
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
141
.github/workflows/release.yml
vendored
141
.github/workflows/release.yml
vendored
@ -1,141 +0,0 @@
|
|||||||
name: Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- ci/*
|
|
||||||
- release/*
|
|
||||||
tags:
|
|
||||||
- v*
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
ref:
|
|
||||||
description: The GitHub ref (e.g. refs/tags/v1.0.0) to release
|
|
||||||
required: false
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
name: Build (${{ matrix.os }}/${{ matrix.arch }})
|
|
||||||
runs-on: ${{ matrix.runner }}
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- runner: ubuntu-latest
|
|
||||||
os: Linux
|
|
||||||
arch: X64
|
|
||||||
- runner: macos-13
|
|
||||||
os: macOS
|
|
||||||
arch: X64
|
|
||||||
- runner: macos-14
|
|
||||||
os: macOS
|
|
||||||
arch: ARM64
|
|
||||||
steps:
|
|
||||||
- env:
|
|
||||||
OS: ${{ matrix.os }}
|
|
||||||
ARCH: ${{ matrix.arch }}
|
|
||||||
run: |
|
|
||||||
if [[ "$OS" != "$RUNNER_OS" || "$ARCH" != "$RUNNER_ARCH" ]]; then
|
|
||||||
echo "::error title=Unexpected Runner::Expected $OS/$ARCH, got $RUNNER_OS/$RUNNER_ARCH"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: actions
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
ref: ${{ inputs.ref || github.ref }}
|
|
||||||
path: lotus
|
|
||||||
- uses: ./actions/.github/actions/install-system-dependencies
|
|
||||||
- uses: ./actions/.github/actions/install-go
|
|
||||||
with:
|
|
||||||
working-directory: lotus
|
|
||||||
- env:
|
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
|
||||||
run: make deps lotus lotus-miner lotus-worker
|
|
||||||
working-directory: lotus
|
|
||||||
- if: runner.os == 'macOS'
|
|
||||||
run: otool -hv lotus
|
|
||||||
working-directory: lotus
|
|
||||||
- env:
|
|
||||||
INPUTS_REF: ${{ inputs.ref }}
|
|
||||||
run: |
|
|
||||||
export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF}
|
|
||||||
../actions/scripts/version-check.sh ./lotus
|
|
||||||
working-directory: lotus
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: lotus-${{ matrix.os }}-${{ matrix.arch }}
|
|
||||||
path: |
|
|
||||||
lotus/lotus
|
|
||||||
lotus/lotus-miner
|
|
||||||
lotus/lotus-worker
|
|
||||||
release:
|
|
||||||
name: Release [publish=${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }}]
|
|
||||||
permissions:
|
|
||||||
# This enables the job to create and/or update GitHub releases
|
|
||||||
contents: write
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [build]
|
|
||||||
env:
|
|
||||||
PUBLISH: ${{ startsWith(inputs.ref || github.ref, 'refs/tags/') }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
path: actions
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
fetch-depth: 0
|
|
||||||
path: lotus
|
|
||||||
ref: ${{ inputs.ref || github.ref }}
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: lotus-Linux-X64
|
|
||||||
path: linux_amd64_v1
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: lotus-macOS-X64
|
|
||||||
path: darwin_amd64_v1
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: lotus-macOS-ARM64
|
|
||||||
path: darwin_arm64
|
|
||||||
- uses: ./actions/.github/actions/install-go
|
|
||||||
with:
|
|
||||||
working-directory: lotus
|
|
||||||
- uses: ipfs/download-ipfs-distribution-action@v1
|
|
||||||
with:
|
|
||||||
name: kubo
|
|
||||||
version: v0.16.0
|
|
||||||
- uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
|
|
||||||
with:
|
|
||||||
distribution: goreleaser-pro
|
|
||||||
version: 2.0.1
|
|
||||||
args: release --clean ${{ env.PUBLISH == 'false' && '--snapshot' || '' }}
|
|
||||||
workdir: lotus
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_GITUB_TOKEN || github.token || '' }}
|
|
||||||
GORELEASER_KEY: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_KEY || '' }}
|
|
||||||
- env:
|
|
||||||
INPUTS_REF: ${{ inputs.ref }}
|
|
||||||
run: |
|
|
||||||
export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF}
|
|
||||||
../actions/scripts/generate-checksums.sh
|
|
||||||
working-directory: lotus
|
|
||||||
- if: env.PUBLISH == 'true'
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
|
||||||
INPUTS_REF: ${{ inputs.ref }}
|
|
||||||
run: |
|
|
||||||
export GITHUB_REF=${INPUTS_REF:-$GITHUB_REF}
|
|
||||||
../actions/scripts/publish-checksums.sh
|
|
||||||
working-directory: lotus
|
|
34
.github/workflows/sorted-pr-checks.yml
vendored
34
.github/workflows/sorted-pr-checks.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: Comment with sorted PR checks
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
pull_number:
|
|
||||||
description: 'Pull request number'
|
|
||||||
required: true
|
|
||||||
workflow_run:
|
|
||||||
workflows:
|
|
||||||
- Build
|
|
||||||
- Check
|
|
||||||
- CodeQL
|
|
||||||
- Test
|
|
||||||
types:
|
|
||||||
- requested
|
|
||||||
- completed
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
checks: read
|
|
||||||
pull-requests: write
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
comment:
|
|
||||||
if: github.event.inputs.pull_number || github.event.workflow_run.event == 'pull_request'
|
|
||||||
uses: ipdxco/sorted-pr-checks/.github/workflows/comment.yml@v1
|
|
||||||
with:
|
|
||||||
pull_number: ${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }}
|
|
||||||
template: unsuccessful_only
|
|
13
.github/workflows/stale.yml
vendored
13
.github/workflows/stale.yml
vendored
@ -4,19 +4,18 @@ on:
|
|||||||
schedule:
|
schedule:
|
||||||
- cron: '0 12 * * *'
|
- cron: '0 12 * * *'
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
stale:
|
stale:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
issues: write
|
issues: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v9
|
- uses: actions/stale@v3
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ github.token }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
|
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
|
||||||
close-issue-message: 'This issue was closed because it is missing author input.'
|
close-issue-message: 'This issue was closed because it is missing author input.'
|
||||||
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
|
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
|
||||||
@ -30,3 +29,5 @@ jobs:
|
|||||||
days-before-pr-close: 2
|
days-before-pr-close: 2
|
||||||
remove-stale-when-updated: true
|
remove-stale-when-updated: true
|
||||||
enable-statistics: true
|
enable-statistics: true
|
||||||
|
|
||||||
|
|
||||||
|
9
.github/workflows/sync-master-main.yaml
vendored
9
.github/workflows/sync-master-main.yaml
vendored
@ -1,20 +1,13 @@
|
|||||||
name: sync-master-main
|
name: sync-master-main
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
sync:
|
sync:
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v2
|
||||||
- name: update remote branch main
|
- name: update remote branch main
|
||||||
run: |
|
run: |
|
||||||
# overrides the remote branch (origin:github) `main`
|
# overrides the remote branch (origin:github) `main`
|
||||||
|
310
.github/workflows/test.yml
vendored
310
.github/workflows/test.yml
vendored
@ -1,310 +0,0 @@
|
|||||||
name: Test
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- release/*
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
discover:
|
|
||||||
name: Discover Test Groups
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
groups: ${{ steps.test.outputs.groups }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- id: test
|
|
||||||
env:
|
|
||||||
# Unit test groups other than unit-rest
|
|
||||||
utests: |
|
|
||||||
[
|
|
||||||
{"name": "unit-cli", "packages": ["./cli/...", "./cmd/...", "./api/..."]},
|
|
||||||
{"name": "unit-storage", "packages": ["./storage/...", "./extern/..."]},
|
|
||||||
{"name": "unit-node", "packages": ["./node/..."]}
|
|
||||||
]
|
|
||||||
# Other tests that require special configuration
|
|
||||||
otests: |
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": "multicore-sdr",
|
|
||||||
"packages": ["./storage/sealer/ffiwrapper"],
|
|
||||||
"go_test_flags": "-run=TestMulticoreSDR",
|
|
||||||
"test_rustproofs_logs": "1"
|
|
||||||
}, {
|
|
||||||
"name": "conformance",
|
|
||||||
"packages": ["./conformance"],
|
|
||||||
"go_test_flags": "-run=TestConformance",
|
|
||||||
"skip_conformance": "0"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
# Mapping from test group names to custom runner labels
|
|
||||||
# The jobs default to running on the default hosted runners (4 CPU, 16 RAM).
|
|
||||||
# We use self-hosted xlarge (4 CPU, 8 RAM; and large - 2 CPU, 4 RAM) runners
|
|
||||||
# to extend the available runner capacity (60 default hosted runners).
|
|
||||||
# We use self-hosted 4xlarge (16 CPU, 32 RAM; and 2xlarge - 8 CPU, 16 RAM) self-hosted
|
|
||||||
# to support resource intensive jobs.
|
|
||||||
runners: |
|
|
||||||
{
|
|
||||||
"itest-deals_concurrent": ["self-hosted", "linux", "x64", "4xlarge"],
|
|
||||||
"itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"],
|
|
||||||
"itest-worker": ["self-hosted", "linux", "x64", "4xlarge"],
|
|
||||||
|
|
||||||
"itest-gateway": ["self-hosted", "linux", "x64", "2xlarge"],
|
|
||||||
"itest-sector_import_full": ["self-hosted", "linux", "x64", "2xlarge"],
|
|
||||||
"itest-sector_import_simple": ["self-hosted", "linux", "x64", "2xlarge"],
|
|
||||||
"itest-wdpost": ["self-hosted", "linux", "x64", "2xlarge"],
|
|
||||||
"unit-storage": ["self-hosted", "linux", "x64", "2xlarge"],
|
|
||||||
|
|
||||||
"itest-batch_deal": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-cli": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_512mb": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_anycid": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_invalid_utf8_label": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_max_staging_deals": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_partial_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_publish": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-deals_remote_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-decode_params": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-dup_mpool_messages": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_account_abstraction": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_api": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_balance": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_bytecode": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_config": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_conformance": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_deploy": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_fee_history": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-eth_transactions": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-fevm_address": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-fevm_events": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-gas_estimation": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-msgindex": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-multisig": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-net": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-nonce": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-path_detach_redeclare": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-pending_deal_allocation": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-remove_verifreg_datacap": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-sector_miner_collateral": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-sector_numassign": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-self_sent_txn": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"itest-verifreg": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"multicore-sdr": ["self-hosted", "linux", "x64", "xlarge"],
|
|
||||||
"unit-node": ["self-hosted", "linux", "x64", "xlarge"]
|
|
||||||
}
|
|
||||||
# A list of test groups that require YugabyteDB to be running
|
|
||||||
yugabytedb: |
|
|
||||||
["itest-harmonydb"]
|
|
||||||
# A list of test groups that require Proof Parameters to be fetched
|
|
||||||
parameters: |
|
|
||||||
[
|
|
||||||
"conformance",
|
|
||||||
"itest-api",
|
|
||||||
"itest-deals_offline",
|
|
||||||
"itest-deals_padding",
|
|
||||||
"itest-deals_partial_retrieval_dm-level",
|
|
||||||
"itest-deals_pricing",
|
|
||||||
"itest-deals",
|
|
||||||
"itest-direct_data_onboard_verified",
|
|
||||||
"itest-direct_data_onboard",
|
|
||||||
"itest-manual_onboarding",
|
|
||||||
"itest-net",
|
|
||||||
"itest-path_detach_redeclare",
|
|
||||||
"itest-path_type_filters",
|
|
||||||
"itest-sealing_resources",
|
|
||||||
"itest-sector_finalize_early",
|
|
||||||
"itest-sector_import_full",
|
|
||||||
"itest-sector_import_simple",
|
|
||||||
"itest-sector_pledge",
|
|
||||||
"itest-sector_unseal",
|
|
||||||
"itest-wdpost_no_miner_storage",
|
|
||||||
"itest-wdpost_worker_config",
|
|
||||||
"itest-wdpost",
|
|
||||||
"itest-worker_upgrade",
|
|
||||||
"itest-worker",
|
|
||||||
"multicore-sdr",
|
|
||||||
"unit-cli",
|
|
||||||
"unit-storage",
|
|
||||||
"itest-curio"
|
|
||||||
]
|
|
||||||
run: |
|
|
||||||
# Create a list of integration test groups
|
|
||||||
itests="$(
|
|
||||||
find ./itests -name "*_test.go" | \
|
|
||||||
jq -R '{
|
|
||||||
"name": "itest-\(. | split("/") | .[2] | sub("_test.go$";""))",
|
|
||||||
"packages": [.]
|
|
||||||
}' | jq -s
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create a list of packages that are covered by the integration and unit tests
|
|
||||||
packages="$(jq -n --argjson utests "$utests" '$utests | map(.packages) | flatten | . + ["./itests/..."]')"
|
|
||||||
|
|
||||||
# Create a new group for the unit tests that are not yet covered
|
|
||||||
rest="$(
|
|
||||||
find . -name "*_test.go" | cut -d/ -f2 | sort | uniq | \
|
|
||||||
jq -R '"./\(.)/..."' | \
|
|
||||||
jq -s --argjson p "$packages" '{"name": "unit-rest", "packages": (. - $p)}'
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Combine the groups for integration tests, unit tests, the new unit-rest group, and the other tests
|
|
||||||
groups="$(jq -n --argjson i "$itests" --argjson u "$utests" --argjson r "$rest" --argjson o "$otests" '$i + $u + [$r] + $o')"
|
|
||||||
|
|
||||||
# Apply custom runner labels to the groups
|
|
||||||
groups="$(jq -n --argjson g "$groups" --argjson r "$runners" '$g | map(. + {"runner": (.name as $n | $r | .[$n]) })')"
|
|
||||||
|
|
||||||
# Apply the needs_yugabytedb flag to the groups
|
|
||||||
groups="$(jq -n --argjson g "$groups" --argjson y "$yugabytedb" '$g | map(. + {"needs_yugabytedb": ([.name] | inside($y)) })')"
|
|
||||||
|
|
||||||
# Apply the needs_parameters flag to the groups
|
|
||||||
groups="$(jq -n --argjson g "$groups" --argjson p "$parameters" '$g | map(. + {"needs_parameters": ([.name] | inside($p)) })')"
|
|
||||||
|
|
||||||
# Output the groups
|
|
||||||
echo "groups=$groups"
|
|
||||||
echo "groups=$(jq -nc --argjson g "$groups" '$g')" >> $GITHUB_OUTPUT
|
|
||||||
cache:
|
|
||||||
name: Cache Dependencies
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
fetch_params_key: ${{ steps.fetch_params.outputs.key }}
|
|
||||||
fetch_params_path: ${{ steps.fetch_params.outputs.path }}
|
|
||||||
make_deps_key: ${{ steps.make_deps.outputs.key }}
|
|
||||||
make_deps_path: ${{ steps.make_deps.outputs.path }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- id: fetch_params
|
|
||||||
env:
|
|
||||||
CACHE_KEY: fetch-params-${{ hashFiles('./extern/filecoin-ffi/parameters.json') }}
|
|
||||||
CACHE_PATH: |
|
|
||||||
/var/tmp/filecoin-proof-parameters/
|
|
||||||
run: |
|
|
||||||
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
|
|
||||||
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
|
|
||||||
- id: make_deps
|
|
||||||
env:
|
|
||||||
CACHE_KEY: ${{ runner.os }}-${{ runner.arch }}-make-deps-${{ hashFiles('./.git/modules/extern/filecoin-ffi/HEAD') }}
|
|
||||||
CACHE_PATH: |
|
|
||||||
./extern/filecoin-ffi/filcrypto.h
|
|
||||||
./extern/filecoin-ffi/libfilcrypto.a
|
|
||||||
./extern/filecoin-ffi/filcrypto.pc
|
|
||||||
run: |
|
|
||||||
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
|
|
||||||
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
|
|
||||||
- id: restore_fetch_params
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
with:
|
|
||||||
key: ${{ steps.fetch_params.outputs.key }}
|
|
||||||
path: ${{ steps.fetch_params.outputs.path }}
|
|
||||||
lookup-only: true
|
|
||||||
- id: restore_make_deps
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
with:
|
|
||||||
key: ${{ steps.make_deps.outputs.key }}
|
|
||||||
path: ${{ steps.make_deps.outputs.path }}
|
|
||||||
lookup-only: true
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
|
||||||
uses: ./.github/actions/install-system-dependencies
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
|
||||||
uses: ./.github/actions/install-go
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true' || steps.restore_make_deps.outputs.cache-hit != 'true'
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ github.token }}
|
|
||||||
run: make deps
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
|
||||||
run: make lotus
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
|
||||||
run: ./lotus fetch-params 2048
|
|
||||||
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
key: ${{ steps.fetch_params.outputs.key }}
|
|
||||||
path: ${{ steps.fetch_params.outputs.path }}
|
|
||||||
- if: steps.restore_make_deps.outputs.cache-hit != 'true'
|
|
||||||
uses: actions/cache/save@v4
|
|
||||||
with:
|
|
||||||
key: ${{ steps.make_deps.outputs.key }}
|
|
||||||
path: ${{ steps.make_deps.outputs.path }}
|
|
||||||
test:
|
|
||||||
needs: [discover, cache]
|
|
||||||
name: Test (${{ matrix.name }})
|
|
||||||
runs-on: ${{ github.repository == 'filecoin-project/lotus' && matrix.runner || 'ubuntu-latest' }}
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
include: ${{ fromJson(needs.discover.outputs.groups) }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: 'recursive'
|
|
||||||
- uses: ./.github/actions/install-system-dependencies
|
|
||||||
- uses: ./.github/actions/install-go
|
|
||||||
- run: go install gotest.tools/gotestsum@latest
|
|
||||||
- name: Restore cached make deps outputs
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
with:
|
|
||||||
key: ${{ needs.cache.outputs.make_deps_key }}
|
|
||||||
path: ${{ needs.cache.outputs.make_deps_path }}
|
|
||||||
fail-on-cache-miss: true
|
|
||||||
- if: ${{ matrix.needs_parameters }}
|
|
||||||
name: Restore cached fetch params outputs
|
|
||||||
uses: actions/cache/restore@v4
|
|
||||||
with:
|
|
||||||
key: ${{ needs.cache.outputs.fetch_params_key }}
|
|
||||||
path: ${{ needs.cache.outputs.fetch_params_path }}
|
|
||||||
fail-on-cache-miss: true
|
|
||||||
- if: ${{ matrix.needs_yugabytedb }}
|
|
||||||
uses: ./.github/actions/start-yugabytedb
|
|
||||||
timeout-minutes: 3
|
|
||||||
# TODO: Install statediff (used to be used for conformance)
|
|
||||||
- id: reports
|
|
||||||
run: mktemp -d | xargs -0 -I{} echo "path={}" | tee -a $GITHUB_OUTPUT
|
|
||||||
# TODO: Track coverage (used to be tracked for conformance)
|
|
||||||
- env:
|
|
||||||
NAME: ${{ matrix.name }}
|
|
||||||
LOTUS_SRC_DIR: ${{ github.workspace }}
|
|
||||||
LOTUS_HARMONYDB_HOSTS: 127.0.0.1
|
|
||||||
REPORTS_PATH: ${{ steps.reports.outputs.path }}
|
|
||||||
SKIP_CONFORMANCE: ${{ matrix.skip_conformance || '1' }}
|
|
||||||
TEST_RUSTPROOFS_LOGS: ${{ matrix.test_rustproofs_logs || '0' }}
|
|
||||||
FORMAT: ${{ matrix.format || 'standard-verbose' }}
|
|
||||||
PACKAGES: ${{ join(matrix.packages, ' ') }}
|
|
||||||
run: |
|
|
||||||
gotestsum \
|
|
||||||
--format "$FORMAT" \
|
|
||||||
--junitfile "$REPORTS_PATH/$NAME.xml" \
|
|
||||||
--jsonfile "$REPORTS_PATH/$NAME.json" \
|
|
||||||
--packages="$PACKAGES" \
|
|
||||||
-- ${{ matrix.go_test_flags || '' }}
|
|
||||||
- if: success() || failure()
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.name }}
|
|
||||||
path: |
|
|
||||||
${{ steps.reports.outputs.path }}/${{ matrix.name }}.xml
|
|
||||||
${{ steps.reports.outputs.path }}/${{ matrix.name }}.json
|
|
||||||
continue-on-error: true
|
|
29
.github/workflows/testground-on-push.yml
vendored
Normal file
29
.github/workflows/testground-on-push.yml
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: Testground PR Checker
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
testground:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: ${{ matrix.composition_file }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- backend_addr: ci.testground.ipfs.team
|
||||||
|
backend_proto: https
|
||||||
|
plan_directory: testplans/lotus-soup
|
||||||
|
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
|
||||||
|
- backend_addr: ci.testground.ipfs.team
|
||||||
|
backend_proto: https
|
||||||
|
plan_directory: testplans/lotus-soup
|
||||||
|
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: testground run
|
||||||
|
uses: testground/testground-github-action@v1
|
||||||
|
with:
|
||||||
|
backend_addr: ${{ matrix.backend_addr }}
|
||||||
|
backend_proto: ${{ matrix.backend_proto }}
|
||||||
|
plan_directory: ${{ matrix.plan_directory }}
|
||||||
|
composition_file: ${{ matrix.composition_file }}
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,14 +1,12 @@
|
|||||||
/lotus
|
/lotus
|
||||||
/lotus-miner
|
/lotus-miner
|
||||||
/lotus-worker
|
/lotus-worker
|
||||||
/lotus-provider
|
|
||||||
/lotus-seed
|
/lotus-seed
|
||||||
/lotus-health
|
/lotus-health
|
||||||
/lotus-chainwatch
|
/lotus-chainwatch
|
||||||
/lotus-shed
|
/lotus-shed
|
||||||
/lotus-sim
|
/lotus-sim
|
||||||
/curio
|
/lotus-provider
|
||||||
/sptool
|
|
||||||
/lotus-townhall
|
/lotus-townhall
|
||||||
/lotus-fountain
|
/lotus-fountain
|
||||||
/lotus-stats
|
/lotus-stats
|
||||||
@ -38,9 +36,6 @@ build/paramfetch.sh
|
|||||||
/darwin
|
/darwin
|
||||||
/linux
|
/linux
|
||||||
*.snap
|
*.snap
|
||||||
devgen.car
|
|
||||||
localnet.json
|
|
||||||
/*.ndjson
|
|
||||||
|
|
||||||
*-fuzz.zip
|
*-fuzz.zip
|
||||||
/chain/types/work_msg/
|
/chain/types/work_msg/
|
||||||
|
@ -5,12 +5,15 @@ linters:
|
|||||||
- govet
|
- govet
|
||||||
- goimports
|
- goimports
|
||||||
- misspell
|
- misspell
|
||||||
- revive
|
- goconst
|
||||||
|
- golint
|
||||||
- errcheck
|
- errcheck
|
||||||
- gosec
|
- gosec
|
||||||
- unconvert
|
- unconvert
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- exportloopref
|
- varcheck
|
||||||
|
- deadcode
|
||||||
|
- scopelint
|
||||||
- unused
|
- unused
|
||||||
|
|
||||||
# We don't want to skip builtin/
|
# We don't want to skip builtin/
|
||||||
@ -22,36 +25,37 @@ skip-dirs:
|
|||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude:
|
exclude:
|
||||||
# gosec
|
- "by other packages, and that stutters; consider calling this"
|
||||||
- "^G101: Potential hardcoded credentials"
|
- "Potential file inclusion via variable"
|
||||||
- "^G108: Profiling endpoint is automatically exposed on /debug/pprof"
|
- "should have( a package)? comment"
|
||||||
- "^G204: Subprocess launched with (variable|a potential tainted input or cmd arguments)"
|
- "Error return value of `logging.SetLogLevel` is not checked"
|
||||||
- "^G301: Expect directory permissions to be 0750 or less"
|
- "comment on exported"
|
||||||
- "^G302: Expect file permissions to be 0600 or less"
|
- "(func|method) \\w+ should be \\w+"
|
||||||
- "^G304: Potential file inclusion via variable"
|
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
|
||||||
- "^G306: Expect WriteFile permissions to be 0600 or less"
|
- "(G306|G301|G307|G108|G302|G204|G104)"
|
||||||
- "^G404: Use of weak random number generator"
|
- "don't use ALL_CAPS in Go names"
|
||||||
# staticcheck
|
- "string .* has .* occurrences, make it a constant"
|
||||||
- "^SA1019: xerrors.* is deprecated: As of Go 1.13, use errors"
|
- "a blank import should be only in a main or test package, or have a comment justifying it"
|
||||||
# revive
|
- "package comment should be of the form"
|
||||||
- "^blank-imports: a blank import should be only in a main or test package, or have a comment justifying it"
|
- "Potential hardcoded credentials"
|
||||||
- "^dot-imports: should not use dot imports"
|
- "Use of weak random number generator"
|
||||||
- "^exported: (func|type) name will be used as [^\\s]+ by other packages, and that stutters; consider calling this \\w+"
|
- "xerrors.* is deprecated"
|
||||||
- "^exported: comment on exported (const|function|method|type|var) [^\\s]+ should be of the form \"\\w+ ...\""
|
|
||||||
- "^exported: exported (const|function|method|type|var) [^\\s]+ should have comment (\\(or a comment on this block\\) )?or be unexported"
|
|
||||||
- "^indent-error-flow: if block ends with a return statement, so drop this else and outdent its block \\(move short variable declaration to its own line if necessary\\)"
|
|
||||||
- "^package-comments: package comment should be of the form \"Package \\w+ ...\""
|
|
||||||
- "^package-comments: should have a package comment"
|
|
||||||
- "^unexported-return: exported func \\w+ returns unexported type [^\\s]+, which can be annoying to use"
|
|
||||||
- "^unused-parameter: parameter '\\w+' seems to be unused, consider removing or renaming it as _"
|
|
||||||
- "^var-naming: (const|func|type|var|struct field|(method|func|interface method) parameter) [A-Z]\\w+ should be"
|
|
||||||
- "^var-naming: (method|range var) \\w*(Api|Http|Id|Rpc|Url)[^\\s]* should be \\w*(API|HTTP|ID|RPC|URL)"
|
|
||||||
- "^var-naming: don't use underscores in Go names"
|
|
||||||
- "^var-naming: don't use ALL_CAPS in Go names; use CamelCase"
|
|
||||||
|
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
|
||||||
|
- path: node/modules/lp2p
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
|
- path: build/params_.*\.go
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
|
- path: api/apistruct/struct.go
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
- path: .*_test.go
|
- path: .*_test.go
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
@ -63,3 +67,12 @@ issues:
|
|||||||
- path: cmd/lotus-bench/.*
|
- path: cmd/lotus-bench/.*
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
|
|
||||||
|
- path: api/test/.*
|
||||||
|
text: "context.Context should be the first parameter"
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
goconst:
|
||||||
|
min-occurrences: 6
|
||||||
|
@ -27,7 +27,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus'
|
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus
|
||||||
- id: lotus-miner
|
- id: lotus-miner
|
||||||
binary: lotus-miner
|
binary: lotus-miner
|
||||||
builder: prebuilt
|
builder: prebuilt
|
||||||
@ -43,7 +43,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner'
|
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner
|
||||||
- id: lotus-worker
|
- id: lotus-worker
|
||||||
binary: lotus-worker
|
binary: lotus-worker
|
||||||
builder: prebuilt
|
builder: prebuilt
|
||||||
@ -59,7 +59,7 @@ builds:
|
|||||||
- goos: linux
|
- goos: linux
|
||||||
goarch: arm64
|
goarch: arm64
|
||||||
prebuilt:
|
prebuilt:
|
||||||
path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker'
|
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- id: primary
|
- id: primary
|
||||||
|
364
CHANGELOG.md
364
CHANGELOG.md
@ -6,364 +6,6 @@
|
|||||||
|
|
||||||
## Improvements
|
## Improvements
|
||||||
|
|
||||||
# v1.27.1-rc2 / 2024-06-17
|
|
||||||
|
|
||||||
This is the second release candidate of the upcoming optional release of Lotus v1.27.1
|
|
||||||
|
|
||||||
## ☢️ Upgrade Warnings ☢️
|
|
||||||
|
|
||||||
- This Lotus release completely removes the Legacy Lotus/Lotus-Miner Markets sub-system from the codebase, which was announced to reach EOL on January 31, 2023.
|
|
||||||
- The **Curio Storage** software, designed to simplify the setup and operation of storage providers, has moved to their own Github-repository: https://github.com/filecoin-project/curio.
|
|
||||||
|
|
||||||
### JSON-RPC 2.0 Specification Conformance
|
|
||||||
|
|
||||||
The JSON-RPC 2.0 specification requires that a `"result"` property be present in the case of no error from an API call. This release ensures that all API calls that return a result have a `"result"` property in the response. This is a behaviour change over Lotus v1.26 and will impact any API call that only has a single error return value, where no error has occurred.
|
|
||||||
|
|
||||||
For example, a successful `WalletSetDefault` in v1.26 would return:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"id": 1
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As of this change, in conformance with the JSON-RPC 2.0 specification it will return:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"jsonrpc": "2.0",
|
|
||||||
"id": 1,
|
|
||||||
"result": null
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
There is no change in the behaviour when a call returns an error, as the error object will still be present in the response.
|
|
||||||
|
|
||||||
## New features
|
|
||||||
|
|
||||||
- feat: Add trace transaction API supporting RPC method `trace_transaction` ([filecoin-project/lotus#12068](https://github.com/filecoin-project/lotus/pull/12068))
|
|
||||||
- feat: Skeleton for nv23 (#11964) ([filecoin-project/lotus#11964](https://github.com/filecoin-project/lotus/pull/11964))
|
|
||||||
- feat: state: Ignore market balance after nv23 (#11976) ([filecoin-project/lotus#11976](https://github.com/filecoin-project/lotus/pull/11976))
|
|
||||||
- feat: ETH compatibility in Filecoin : Support Homestead and EIP-155 Ethereum transactions("legacy" transactions) in Filecoin after NV23 (#11969) ([filecoin-project/lotus#11969](https://github.com/filecoin-project/lotus/pull/11969))
|
|
||||||
- fix: hello: avoid dialing when fetching hello tipset (#12032) ([filecoin-project/lotus#12032](https://github.com/filecoin-project/lotus/pull/12032))
|
|
||||||
- feat: cli,events: speed up backfill with temporary index (#11953) ([filecoin-project/lotus#11953](https://github.com/filecoin-project/lotus/pull/11953))
|
|
||||||
|
|
||||||
## Improvements
|
|
||||||
- Event index should be unique for tipsets (#11952) ([filecoin-project/lotus#11952](https://github.com/filecoin-project/lotus/pull/11952))
|
|
||||||
- cleanup: Lotus client: Remove markets and deal-making from Lotus Client (#11999) ([filecoin-project/lotus#11999](https://github.com/filecoin-project/lotus/pull/11999))
|
|
||||||
- fix: ci: use filecoin-ffi hash to cache make deps outputs (#11961) ([filecoin-project/lotus#11961](https://github.com/filecoin-project/lotus/pull/11961))
|
|
||||||
- add ETH addrs API to Gateway (#11979) ([filecoin-project/lotus#11979](https://github.com/filecoin-project/lotus/pull/11979))
|
|
||||||
- chore: remove unmaintained bootstrappers (#11983) ([filecoin-project/lotus#11983](https://github.com/filecoin-project/lotus/pull/11983))
|
|
||||||
- feat: api: add SectorNumber to MarketDealState (nv22)
|
|
||||||
- fix: copy Flags field from SectorOnChainInfo
|
|
||||||
- fix: ETH RPC API: ETH Call should use the parent state root of the subsequent tipset ([filecoin-project/lotus#11905](https://github.com/filecoin-project/lotus/pull/11905))
|
|
||||||
- fix: events: sqlite db improvements ([filecoin-project/lotus#12090](https://github.com/filecoin-project/lotus/pull/12090))
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
|
|
||||||
- chore: libp2p: update to v0.34.1 (#12027) ([filecoin-project/lotus#12027](https://github.com/filecoin-project/lotus/pull/12027))
|
|
||||||
- chore: update drand (#12021) ([filecoin-project/lotus#12021](https://github.com/filecoin-project/lotus/pull/12021))
|
|
||||||
- Bump pubsub-dep (#11966) ([filecoin-project/lotus#11966](https://github.com/filecoin-project/lotus/pull/11966))
|
|
||||||
- fix: update go-jsonrpc to v0.3.2
|
|
||||||
- Bump go-jsonrpc to v0.4.0 (#12034) ([filecoin-project/lotus#12034](https://github.com/filecoin-project/lotus/pull/12034))
|
|
||||||
- docs: rpc: document go-jsonrpc behaviour change
|
|
||||||
- chore: update go-data-transfer and go-graphsync
|
|
||||||
- github.com/filecoin-project/go-jsonrpc (v0.3.1 -> v0.3.2)
|
|
||||||
- github.com/filecoin-project/go-state-types (v0.13.3 -> v0.14.0-dev)
|
|
||||||
|
|
||||||
## Lotus-Miner / Curio related changes
|
|
||||||
|
|
||||||
- fix logs (#12036) ([filecoin-project/lotus#12036](https://github.com/filecoin-project/lotus/pull/12036))
|
|
||||||
- feat: curioweb: Improve task_history indexes (#11911) ([filecoin-project/lotus#11911](https://github.com/filecoin-project/lotus/pull/11911))
|
|
||||||
- fix: curio taskstorage: Don't try to free reservations by nulled TaskID (#12018) ([filecoin-project/lotus#12018](https://github.com/filecoin-project/lotus/pull/12018))
|
|
||||||
- fix actor string (#12019) ([filecoin-project/lotus#12019](https://github.com/filecoin-project/lotus/pull/12019))
|
|
||||||
- fix: curio: Update pgx imports, fix db_storage alloc
|
|
||||||
- feat: curioweb: Show piece info on the sector page (#11955) ([filecoin-project/lotus#11955](https://github.com/filecoin-project/lotus/pull/11955))
|
|
||||||
- curio: feat: break trees task into TreeD(prefetch) and TreeRC (#11895) ([filecoin-project/lotus#11895](https://github.com/filecoin-project/lotus/pull/11895))
|
|
||||||
- fix: curio: node UI & darwin gpu count (#11950) ([filecoin-project/lotus#11950](https://github.com/filecoin-project/lotus/pull/11950))
|
|
||||||
- feat: curio: Keep more sector metadata in the DB long-term (#11933) ([filecoin-project/lotus#11933](https://github.com/filecoin-project/lotus/pull/11933))
|
|
||||||
- fix: curio/lmrpc: Check ParkPiece success before creating sectors (#11975) ([filecoin-project/lotus#11975](https://github.com/filecoin-project/lotus/pull/11975))
|
|
||||||
- feat: curio: docker devnet (#11954) ([filecoin-project/lotus#11954](https://github.com/filecoin-project/lotus/pull/11954))
|
|
||||||
- feat: curio: alertManager (#11926) ([filecoin-project/lotus#11926](https://github.com/filecoin-project/lotus/pull/11926))
|
|
||||||
- curio cfg edit: ux cleanups (#11985) ([filecoin-project/lotus#11985](https://github.com/filecoin-project/lotus/pull/11985))
|
|
||||||
- fix: curio: Drop FKs from pipeline to fix retry loops (#11973) ([filecoin-project/lotus#11973](https://github.com/filecoin-project/lotus/pull/11973))
|
|
||||||
- Produce DEB files for amd64 for openCL and cuda (#11885) ([filecoin-project/lotus#11885](https://github.com/filecoin-project/lotus/pull/11885))
|
|
||||||
- gui-listen fix (#12013) ([filecoin-project/lotus#12013](https://github.com/filecoin-project/lotus/pull/12013))
|
|
||||||
- feat: curio: allow multiple pieces per sector (#11935) ([filecoin-project/lotus#11935](https://github.com/filecoin-project/lotus/pull/11935))
|
|
||||||
- chore: update yugabyte deps (#12022) ([filecoin-project/lotus#12022](https://github.com/filecoin-project/lotus/pull/12022))
|
|
||||||
- fix: harmonydb: Use timestampz instead of timestamp across the schema (#12030) ([filecoin-project/lotus#12030](https://github.com/filecoin-project/lotus/pull/12030))
|
|
||||||
- cleanup: miner: remove markets and deal-making from Lotus Miner (#12005) ([filecoin-project/lotus#12005](https://github.com/filecoin-project/lotus/pull/12005))
|
|
||||||
- fix non existing sector (#12012) ([filecoin-project/lotus#12012](https://github.com/filecoin-project/lotus/pull/12012))
|
|
||||||
- feat: curio ffiselect: Isolate gpu calls in a subprocess (#11994) ([filecoin-project/lotus#11994](https://github.com/filecoin-project/lotus/pull/11994))
|
|
||||||
- feat: curio: jsonrpc in webui (#11904) ([filecoin-project/lotus#11904](https://github.com/filecoin-project/lotus/pull/11904))
|
|
||||||
- fix: itests: Fix flaky curio itest (#12037) ([filecoin-project/lotus#12037](https://github.com/filecoin-project/lotus/pull/12037))
|
|
||||||
- feat: curio: wdPost and wnPost alerts (#12029) ([filecoin-project/lotus#12029](https://github.com/filecoin-project/lotus/pull/12029))
|
|
||||||
- fix: storage: Fix a race in GenerateWindowPoStAdv (#12064) ([filecoin-project/lotus#12064](https://github.com/filecoin-project/lotus/pull/12064))
|
|
||||||
- Remove "provider" relics (#11992) ([filecoin-project/lotus#11992](https://github.com/filecoin-project/lotus/pull/11992))
|
|
||||||
- fix sector UI (#12016) ([filecoin-project/lotus#12016](https://github.com/filecoin-project/lotus/pull/12016))
|
|
||||||
|
|
||||||
## Others
|
|
||||||
- ci: deprecate circle ci in favour of github actions (#11786) ([filecoin-project/lotus#11786](https://github.com/filecoin-project/lotus/pull/11786))
|
|
||||||
- src: chain: remove C dependency from builtin types (#12015) ([filecoin-project/lotus#12015](https://github.com/filecoin-project/lotus/pull/12015))
|
|
||||||
- chore: fix function names (#12043) ([filecoin-project/lotus#12043](https://github.com/filecoin-project/lotus/pull/12043))
|
|
||||||
- chore: bump build version in master (#11946) ([filecoin-project/lotus#11946](https://github.com/filecoin-project/lotus/pull/11946))
|
|
||||||
- fix: test: no snap deals in immutable deadlines (#12071) ([filecoin-project/lotus#12071](https://github.com/filecoin-project/lotus/pull/12071))
|
|
||||||
- test: actors: manual CC onboarding and proving integration test (#12017) ([filecoin-project/lotus#12017](https://github.com/filecoin-project/lotus/pull/12017))
|
|
||||||
- fix: ci: keep lotus checkout clean in the release workflow (#12028) ([filecoin-project/lotus#12028](https://github.com/filecoin-project/lotus/pull/12028))
|
|
||||||
- feat!: build: separate miner and node version strings
|
|
||||||
- chore: lint: address feedback from reviews
|
|
||||||
- chore: lint: fix lint errors with new linting config
|
|
||||||
- chore: lint: update golangci lint config
|
|
||||||
- ci: fix when sorted pr checks workflow is executed
|
|
||||||
- doc: eth: restore comment lost in linter cleanup
|
|
||||||
- fix: ci: publish correct docker tags on workflow dispatch (#12060) ([filecoin-project/lotus#12060](https://github.com/filecoin-project/lotus/pull/12060))
|
|
||||||
- feat: libp2p: Lotus stream cleanup (#11993) ([filecoin-project/lotus#11993](https://github.com/filecoin-project/lotus/pull/11993))
|
|
||||||
- Update SupportedProofTypes (#11988) ([filecoin-project/lotus#11988](https://github.com/filecoin-project/lotus/pull/11988))
|
|
||||||
- Revert "Update SupportedProofTypes (#11988)" (#11990) ([filecoin-project/lotus#11990](https://github.com/filecoin-project/lotus/pull/11990))
|
|
||||||
- chore: docs: Update skeleton guide (#11960) ([filecoin-project/lotus#11960](https://github.com/filecoin-project/lotus/pull/11960))
|
|
||||||
- chore: ci: request contents read permissions explicitly in gha (#12055) ([filecoin-project/lotus#12055](https://github.com/filecoin-project/lotus/pull/12055))
|
|
||||||
- fix: ci: use custom GITHUB_TOKEN for GoReleaser (#12059) ([filecoin-project/lotus#12059](https://github.com/filecoin-project/lotus/pull/12059))
|
|
||||||
- chore: pin golanglint-ci to v1.58.2 (#12054) ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054))
|
|
||||||
- chore: fix some function names (#12031) ([filecoin-project/lotus#12031](https://github.com/filecoin-project/lotus/pull/12031))
|
|
||||||
- src: lint: bump golangci-lint to 1.59, address unchecked fmt.Fprint*
|
|
||||||
- fix: ci: do not use deprecated --debug goreleaser flag ([filecoin-project/lotus#12086](https://github.com/filecoin-project/lotus/pull/12086))
|
|
||||||
- chore: Remove forgotten graphsync references ([filecoin-project/lotus#12084](https://github.com/filecoin-project/lotus/pull/12084))
|
|
||||||
- chore: types: remove more items forgotten after markets ([filecoin-project/lotus#12095](https://github.com/filecoin-project/lotus/pull/12095))
|
|
||||||
- chore: api: the Net API/CLI now remains only on daemon ([filecoin-project/lotus#12100](https://github.com/filecoin-project/lotus/pull/12100))
|
|
||||||
|
|
||||||
## Contributors
|
|
||||||
|
|
||||||
| Contributor | Commits | Lines ± | Files Changed |
|
|
||||||
|-------------|---------|---------|---------------|
|
|
||||||
| Aarsh Shah | 9 | +5710/-35899 | 201 |
|
|
||||||
| Łukasz Magiera | 21 | +1891/-33776 | 335 |
|
|
||||||
| LexLuthr | 9 | +4916/-1637 | 107 |
|
|
||||||
| Phi-rjan | 9 | +3544/-187 | 92 |
|
|
||||||
| Rod Vagg | 15 | +2183/-479 | 164 |
|
|
||||||
| Piotr Galar | 6 | +130/-2386 | 30 |
|
|
||||||
| Andrew Jackson (Ajax) | 6 | +1072/-533 | 63 |
|
|
||||||
| ZenGround0 | 1 | +235/-13 | 3 |
|
|
||||||
| Hubert Bugaj | 3 | +57/-37 | 5 |
|
|
||||||
| Steven Allen | 3 | +25/-15 | 6 |
|
|
||||||
| Peter Rabbitson | 1 | +16/-8 | 4 |
|
|
||||||
| tomfees | 1 | +6/-6 | 5 |
|
|
||||||
| imxyb | 1 | +6/-0 | 1 |
|
|
||||||
| yumeiyin | 1 | +2/-2 | 2 |
|
|
||||||
| galargh | 1 | +2/-2 | 1 |
|
|
||||||
|
|
||||||
# v1.27.0 / 2024-05-27
|
|
||||||
|
|
||||||
This is an optional feature release of Lotus. Lotus v1.27.0 includes numerous improvements, bugfixes and enhancements for node operators, RPC- and ETH RPC-providers. This feature release also introduces Curio in a Beta release. Check out the Curio Beta release section for how you can get started with Curio.
|
|
||||||
|
|
||||||
## ☢️ Upgrade Warnings ☢️
|
|
||||||
|
|
||||||
- This feature release drops the Raft cluster code experiment from the codebase. This Raft cluster never graduated beyond an experiment, had poor UX (e.g. no way to manage a running cluster, so it didn't provide High Availability), and pulled in a lot of heavy dependencies. We keep the multi-node RPC feature, it is not perfect, but it is useful.
|
|
||||||
- Event Database: Two sequential migrations will adjust indexes without altering data or columns, ensuring minimal invasiveness when upgrading to this release. However, these migrations may be time-consuming for nodes with extensive event databases.
|
|
||||||
|
|
||||||
## Indexers, RPC- and ETH RPC-providers improvements
|
|
||||||
|
|
||||||
This release includes a lot of improvements and fixes for indexers, RPC- and ETH RPC-providers. Specifically these PRs:
|
|
||||||
|
|
||||||
- [Significant performance improvements of eth_getLog](https://github.com/filecoin-project/lotus/pull/11477)
|
|
||||||
- [Return the correct block gas limit in the EthAP](https://github.com/filecoin-project/lotus/pull/11747)
|
|
||||||
- [Accept input data in call arguments under field 'input'](https://github.com/filecoin-project/lotus/pull/11505)
|
|
||||||
- [Length check the array sent to eth_feeHistory RPC](https://github.com/filecoin-project/lotus/pull/11696)
|
|
||||||
- [ETH subscribe tipsets API should only return tipsets that have been executed](https://github.com/filecoin-project/lotus/pull/11858)
|
|
||||||
- [Adjust indexes in event index db to match query patterns](https://github.com/filecoin-project/lotus/pull/111934)
|
|
||||||
|
|
||||||
## ⭐️ Curio Beta Release ⭐️
|
|
||||||
|
|
||||||
**Curio**, the next generation of Lotus-Miner, also referred to as MinerV2! This release officially transitions Curio into beta and introduces a suite of powerful features designed to enhance your storage operations.
|
|
||||||
|
|
||||||
### Highlights
|
|
||||||
|
|
||||||
- **Curio as MinerV2**: Embrace the revolutionary upgrade from Lotus-Miner to Curio. This transition is not just a rebranding—it's an upgrade to a more robust, scalable, and user-friendly version.
|
|
||||||
- **High Availability**: Curio is designed for high availability. You can run multiple instances of Curio nodes to handle similar type of tasks. The distributed scheduler and greedy worker design will ensure that tasks are completed on time despite most partial outages. You can safely update one of your Curio machines without disrupting the operation of the others.
|
|
||||||
- **Node Heartbeat**: Each Curio node in a cluster must post a heartbeat message every 10 minutes in HarmonyDB updating its status. If a heartbeat is missed, the node is considered lost and all tasks can now be scheduled on remaining nodes.
|
|
||||||
- **Task Retry**: Each task in Curio has a limit on how many times it should be tried before being declared lost. This ensures that Curio does not keep retrying bad tasks indefinitely. This safeguards against lost computation time and storage.
|
|
||||||
- **Polling**: Curio avoids overloading nodes with a polling system. Nodes check for tasks they can handle, prioritizing idle nodes for even workload distribution.
|
|
||||||
- **Simple Configuration Management**: The configuration is stored in the database in the forms of layers. These layers can be stacked on top of each other to create a final configuration. Users can reuse these layers to control the behavior of multiple machines without needing to maintain the configuration of each node. Start the binary with the appropriate flags to connect with YugabyteDB and specify which configuration layers to use to get desired behaviour.
|
|
||||||
|
|
||||||
### Getting Started with Curio
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd lotus
|
|
||||||
git pull
|
|
||||||
make clean deps all
|
|
||||||
sudo make install
|
|
||||||
```
|
|
||||||
|
|
||||||
On your local-dev-net or calibrationnet lotus-miner machine, initiate:
|
|
||||||
|
|
||||||
`curio guided-setup`
|
|
||||||
|
|
||||||
### Need More Info?
|
|
||||||
|
|
||||||
For detailed documentation and additional information on Curio:
|
|
||||||
|
|
||||||
Curio Overview <- insert link
|
|
||||||
Visit the Curio Official Website insert link
|
|
||||||
|
|
||||||
❗Curio is in Beta state, and we recommend our users to run Curio in a testing environment or on the Calibration network for the time being.
|
|
||||||
|
|
||||||
## New features
|
|
||||||
|
|
||||||
- feat: exchange: change GetBlocks to always fetch the requested number of tipsets ([filecoin-project/lotus#11565](https://github.com/filecoin-project/lotus/pull/11565))
|
|
||||||
- feat: syncer: optimize syncFork for one-epoch forks ([filecoin-project/lotus#11533](https://github.com/filecoin-project/lotus/pull/11533))
|
|
||||||
- feat: api: improve the correctness of Eth's trace_block (#11609) ([filecoin-project/lotus#11609](https://github.com/filecoin-project/lotus/pull/11609))
|
|
||||||
- perf: api: add indexes to event topics and emitter addr (#11477) ([filecoin-project/lotus#11477](https://github.com/filecoin-project/lotus/pull/11477))
|
|
||||||
- feat: drand: refactor round verification ([filecoin-project/lotus#11598](https://github.com/filecoin-project/lotus/pull/11598))
|
|
||||||
- feat: add Forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636))
|
|
||||||
- feat: curio: add miner init (#11775) ([filecoin-project/lotus#11775](https://github.com/filecoin-project/lotus/pull/11775))
|
|
||||||
- feat: curio: sectors UI (#11869) ([filecoin-project/lotus#11869](https://github.com/filecoin-project/lotus/pull/11869))
|
|
||||||
- feat: curio: storage index gc task (#11884) ([filecoin-project/lotus#11884](https://github.com/filecoin-project/lotus/pull/11884))
|
|
||||||
- feat: curio: web based config edit (#11822) ([filecoin-project/lotus#11822](https://github.com/filecoin-project/lotus/pull/11822))
|
|
||||||
- feat: spcli: sectors extend improvements (#11798) ([filecoin-project/lotus#11798](https://github.com/filecoin-project/lotus/pull/11798))
|
|
||||||
- feat: curio: Add schemas for DDO deal support (#11805) ([filecoin-project/lotus#11805](https://github.com/filecoin-project/lotus/pull/11805))
|
|
||||||
- feat: curioweb: add favicon (#11804) ([filecoin-project/lotus#11804](https://github.com/filecoin-project/lotus/pull/11804))
|
|
||||||
- feat: lotus-provider: Fetch params on startup when needed ([filecoin-project/lotus#11650](https://github.com/filecoin-project/lotus/pull/11650))
|
|
||||||
- feat: mpool: Cache actors in lite mode (#11668) ([filecoin-project/lotus#11668](https://github.com/filecoin-project/lotus/pull/11668))
|
|
||||||
- feat: curio: simpler reservation release logic (#11900) ([filecoin-project/lotus#11900](https://github.com/filecoin-project/lotus/pull/11900))
|
|
||||||
- feat: curio: add StorageInit api (#11918) ([filecoin-project/lotus#11918](https://github.com/filecoin-project/lotus/pull/11918))
|
|
||||||
- feat: lotus-provider: SDR Sealing pipeline ([filecoin-project/lotus#11534](https://github.com/filecoin-project/lotus/pull/11534))
|
|
||||||
- feat: curioweb: Sector info page (#11846) ([filecoin-project/lotus#11846](https://github.com/filecoin-project/lotus/pull/11846))
|
|
||||||
- feat: curio web: node info page (#11745) ([filecoin-project/lotus#11745](https://github.com/filecoin-project/lotus/pull/11745))
|
|
||||||
- feat: fvm: optimize FVM lanes a bit (#11875) ([filecoin-project/lotus#11875](https://github.com/filecoin-project/lotus/pull/11875))
|
|
||||||
- feat: Gateway API: Add ETH -> FIL and FIL -> ETH address conversion APIs to the Gateway (#11979) ([filecoin-project/lotus#11979](https://github.com/filecoin-project/lotus/pull/11979))
|
|
||||||
|
|
||||||
## Improvements
|
|
||||||
|
|
||||||
- fix: api: return the correct block gas limit in the EthAPI (#11747) ([filecoin-project/lotus#11747](https://github.com/filecoin-project/lotus/pull/11747))
|
|
||||||
- fix: exchange: explicitly cast the block message limit const (#11511) ([filecoin-project/lotus#11511](https://github.com/filecoin-project/lotus/pull/11511))
|
|
||||||
- fix: Eth API: accept input data in call arguments under field 'input' (#11505) ([filecoin-project/lotus#11505](https://github.com/filecoin-project/lotus/pull/11505))
|
|
||||||
- fix: api: Length check the array sent to eth_feeHistory RPC (#11696) ([filecoin-project/lotus#11696](https://github.com/filecoin-project/lotus/pull/11696))
|
|
||||||
- fix: api: fix EthSubscribe tipsets off by one (#11858) ([filecoin-project/lotus#11858](https://github.com/filecoin-project/lotus/pull/11858))
|
|
||||||
- fix: lotus-provider: Fix log output format in wdPostTaskCmd ([filecoin-project/lotus#11504](https://github.com/filecoin-project/lotus/pull/11504))
|
|
||||||
- fix: lmcli: make 'sectors list' DDO-aware (#11839) ([filecoin-project/lotus#11839](https://github.com/filecoin-project/lotus/pull/11839))
|
|
||||||
- fix: lpwinning: Fix MiningBase.afterPropDelay ([filecoin-project/lotus#11654](https://github.com/filecoin-project/lotus/pull/11654))
|
|
||||||
- fix: exchange: allow up to 10k messages per block ([filecoin-project/lotus#11506](https://github.com/filecoin-project/lotus/pull/11506))
|
|
||||||
- fix: harmony: Fix task reclaim on restart ([filecoin-project/lotus#11498](https://github.com/filecoin-project/lotus/pull/11498))
|
|
||||||
- fix: lotus-provider: Wait for the correct taskID ([filecoin-project/lotus#11493](https://github.com/filecoin-project/lotus/pull/11493))
|
|
||||||
- fix: lotus-provider: show addresses in log ([filecoin-project/lotus#11490](https://github.com/filecoin-project/lotus/pull/11490))
|
|
||||||
- fix: sql Scan cannot write to an object ([filecoin-project/lotus#11485](https://github.com/filecoin-project/lotus/pull/11485))
|
|
||||||
- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11482](https://github.com/filecoin-project/lotus/pull/11482))
|
|
||||||
- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480))
|
|
||||||
- fix: chain: use latest go-state-types types for miner UI ([filecoin-project/lotus#11566](https://github.com/filecoin-project/lotus/pull/11566))
|
|
||||||
- fix: Dockerfile non-interactive snapshot import (#11579) ([filecoin-project/lotus#11579](https://github.com/filecoin-project/lotus/pull/11579))
|
|
||||||
- fix: daemon: avoid prompting to remove chain when noninteractive (#11582) ([filecoin-project/lotus#11582](https://github.com/filecoin-project/lotus/pull/11582))
|
|
||||||
- fix: (events): check for sync-in-progress (#11932) ([filecoin-project/lotus#11932](https://github.com/filecoin-project/lotus/pull/11932))
|
|
||||||
- fix: curio: common commands (#11879) ([filecoin-project/lotus#11879](https://github.com/filecoin-project/lotus/pull/11879))
|
|
||||||
- fix: curio: fix incorrect null check for varchar column (#11881) ([filecoin-project/lotus#11881](https://github.com/filecoin-project/lotus/pull/11881))
|
|
||||||
- fix: local storage reservations fixes (#11866) ([filecoin-project/lotus#11866](https://github.com/filecoin-project/lotus/pull/11866))
|
|
||||||
- fix: curio: Check deal start epoch passed in PrecommitSubmit (#11873) ([filecoin-project/lotus#11873](https://github.com/filecoin-project/lotus/pull/11873))
|
|
||||||
- fix: curio: base config by default (#11676) ([filecoin-project/lotus#11676](https://github.com/filecoin-project/lotus/pull/11676))
|
|
||||||
- fix: curio: Start BoostAdapters before blocking rpc serve (#11871) ([filecoin-project/lotus#11871](https://github.com/filecoin-project/lotus/pull/11871))
|
|
||||||
- fix: cli: json flag (#11868) ([filecoin-project/lotus#11868](https://github.com/filecoin-project/lotus/pull/11868))
|
|
||||||
- feat: curio/lmrpc: Ingest backpressure (#11865) ([filecoin-project/lotus#11865](https://github.com/filecoin-project/lotus/pull/11865))
|
|
||||||
- feat: curio: Cleanup data copies after seal ops (#11847) ([filecoin-project/lotus#11847](https://github.com/filecoin-project/lotus/pull/11847))
|
|
||||||
- fix: spcli: add reference to the terminate command (#11851) ([filecoin-project/lotus#11851](https://github.com/filecoin-project/lotus/pull/11851))
|
|
||||||
- fix: sealing: improve gasEstimate logging (#11840) ([filecoin-project/lotus#11840](https://github.com/filecoin-project/lotus/pull/11840))
|
|
||||||
- fix: harmony: Try other tasks when storage claim fails
|
|
||||||
- fix: test: TestForkPreMigration hanging when env-var is set (#11838) ([filecoin-project/lotus#11838](https://github.com/filecoin-project/lotus/pull/11838))
|
|
||||||
- fix: piece: Don't return StartEport in PieceDealInfo.EndEpoch (#11832) ([filecoin-project/lotus#11832](https://github.com/filecoin-project/lotus/pull/11832))
|
|
||||||
- fix: paths/local: Fix on-disk storage accounting in new reservations (#11825) ([filecoin-project/lotus#11825](https://github.com/filecoin-project/lotus/pull/11825))
|
|
||||||
- fix: sealing pipeline: Fix panic on padding pieces in WaitDeals (#11708) ([filecoin-project/lotus#11708](https://github.com/filecoin-project/lotus/pull/11708))
|
|
||||||
- feat: ipfs: remove IPFS client backend (#11661) ([filecoin-project/lotus#11661](https://github.com/filecoin-project/lotus/pull/11661))
|
|
||||||
- fix: docs: Modify generate-lotus-cli.py to ignoring aliases. ([filecoin-project/lotus#11535](https://github.com/filecoin-project/lotus/pull/11535))
|
|
||||||
- fix: eth: decode as actor creation iff "to" is the EAM (#11520) ([filecoin-project/lotus#11520](https://github.com/filecoin-project/lotus/pull/11520))
|
|
||||||
- fix(events): properly decorate events db errors (#11856) ([filecoin-project/lotus#11856](https://github.com/filecoin-project/lotus/pull/11856))
|
|
||||||
- fix: CLI: adjust TermMax for extend-claim used by a different client (#11764) ([filecoin-project/lotus#11764](https://github.com/filecoin-project/lotus/pull/111764))
|
|
||||||
- fix: copy Flags field from SectorOnChainInfo (#11963) ([filecoin-project/lotus#11963](https://github.com/filecoin-project/lotus/pull/11963))
|
|
||||||
- feat: libp2p: Lotus stream cleanup (#11993) ([filecoin-project/lotus#11993](https://github.com/filecoin-project/lotus/pull/11993))
|
|
||||||
|
|
||||||
## Dependencies
|
|
||||||
|
|
||||||
- chore: update deps (#11819) ([filecoin-project/lotus#11819](https://github.com/filecoin-project/lotus/pull/11819))
|
|
||||||
- chore: mod: use upstream poseidon ([filecoin-project/lotus#11557](https://github.com/filecoin-project/lotus/pull/11557))
|
|
||||||
- deps: multiaddress ([filecoin-project/lotus#11558](https://github.com/filecoin-project/lotus/pull/11558))
|
|
||||||
- chore:libp2p: update libp2p deps in master ([filecoin-project/lotus#11522](https://github.com/filecoin-project/lotus/pull/11522))
|
|
||||||
- dep: go-multi-address ([filecoin-project/lotus#11563](https://github.com/filecoin-project/lotus/pull/11563))
|
|
||||||
- chore: update go-data-transfer and go-graphsync (#12000) ([filecoin-project/lotus#12000](https://github.com/filecoin-project/lotus/pull/2000))
|
|
||||||
- chore: update drand (#12021) ([filecoin-project/lotus#12021](https://github.com/filecoin-project/lotus/pull/12021))
|
|
||||||
- chore: libp2p: update to v0.34.1 (12027) ([filecoin-project/lotus#12027](https://github.com/filecoin-project/lotus/pull/12027))
|
|
||||||
- github.com/filecoin-project/go-amt-ipld/ (v4.2.0 -> v4.3.0)
|
|
||||||
- github.com/filecoin-project/go-state-types (v0.13.1 -> v0.13.3)
|
|
||||||
- github.com/libp2p/go-libp2p-pubsub (v0.10.0 -> v0.10.1)
|
|
||||||
- github.com/libp2p/go-libp2p (v0.33.2 -> v0.34.1)
|
|
||||||
|
|
||||||
## Others
|
|
||||||
|
|
||||||
- ci: ci: create gh workflow that runs go checks (#11761) ([filecoin-project/lotus#11761](https://github.com/filecoin-project/lotus/pull/11761))
|
|
||||||
- ci: ci: create gh workflow that runs go build (#11760) ([filecoin-project/lotus#11760](https://github.com/filecoin-project/lotus/pull/11760))
|
|
||||||
- ci: cancel in progress runs on pull requests only (#11842) ([filecoin-project/lotus#11842](https://github.com/filecoin-project/lotus/pull/11842))
|
|
||||||
- ci: ci: list processes before calling apt-get to enable debugging (#11815) ([filecoin-project/lotus#11815](https://github.com/filecoin-project/lotus/pull/11815))
|
|
||||||
- ci: ci: allow master main sync to write to the repository (#11784) ([filecoin-project/lotus#11784](https://github.com/filecoin-project/lotus/pull/11784))
|
|
||||||
- ci: ci: create gh workflow that runs go tests (#11762) ([filecoin-project/lotus#11762](https://github.com/filecoin-project/lotus/pull/11762))
|
|
||||||
- ci: ci: deprecate circle ci in favour of github actions (#11786) ([filecoin-project/lotus#11786](https://github.com/filecoin-project/lotus/pull/1786))
|
|
||||||
- misc: Drop the raft-cluster experiment ([filecoin-project/lotus#11468](https://github.com/filecoin-project/lotus/pull/11468))
|
|
||||||
- chore: fix some typos in comments (#11892) ([filecoin-project/lotus#11892](https://github.com/filecoin-project/lotus/pull/11892))
|
|
||||||
- chore: fix typos (#11848) ([filecoin-project/lotus#11848](https://github.com/filecoin-project/lotus/pull/11848))
|
|
||||||
- chore: fix typo (#11697) ([filecoin-project/lotus#11697](https://github.com/filecoin-project/lotus/pull/11697))
|
|
||||||
- chore: fix 2 typo's (#11542) ([filecoin-project/lotus#11542](https://github.com/filecoin-project/lotus/pull/11542))
|
|
||||||
- chore: calibnet: Update bootstrap peer list ([filecoin-project/lotus#11672](https://github.com/filecoin-project/lotus/pull/11672))
|
|
||||||
- chore: build: Bump version in master ([filecoin-project/lotus#11475](https://github.com/filecoin-project/lotus/pull/11475))
|
|
||||||
- chore: releases: merge releases branch to master ([filecoin-project/lotus#11578](https://github.com/filecoin-project/lotus/pull/11578))
|
|
||||||
- chore: Add systemd memory note on install and in config (#11641) ([filecoin-project/lotus#11641](https://github.com/filecoin-project/lotus/pull/11641))
|
|
||||||
- chore: switch back to upstream ledger library (#11651) ([filecoin-project/lotus#11651](https://github.com/filecoin-project/lotus/pull/11651))
|
|
||||||
- chore: build: update minimum go version to 1.21.7 (#11652) ([filecoin-project/lotus#11652](https://github.com/filecoin-project/lotus/pull/11652))
|
|
||||||
- chore: docs: nv-skeleton documentation (#11065) ([filecoin-project/lotus#11065](https://github.com/filecoin-project/lotus/pull/11065))
|
|
||||||
- chore: Add v13 support to invariants-checker (#11931) ([filecoin-project/lotus#11931](https://github.com/filecoin-project/lotus/pull/11931))
|
|
||||||
- chore: remove unmaintained bootstrappers (#11983) ([filecoin-project/lotus#11983](https://github.com/filecoin-project/lotus/pull/11983))
|
|
||||||
- chore: go mod: revert go version change as it breaks Docker build (#12050) ([filecoin-project/lotus#12050](https://github.com/filecoin-project/lotus/pull/12050))
|
|
||||||
- chore: pin golanglint-ci to v1.58.2 ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054))
|
|
||||||
|
|
||||||
## Contributors
|
|
||||||
|
|
||||||
| Contributor | Commits | Lines ± | Files Changed |
|
|
||||||
|-------------|---------|---------|---------------|
|
|
||||||
| Rod Vagg | 20 | +55315/-204 | 58 |
|
|
||||||
| Łukasz Magiera | 201 | +16244/-6541 | 647 |
|
|
||||||
| Andrew Jackson (Ajax) | 53 | +15293/-6764 | 394 |
|
|
||||||
| Phi-rjan | 6 | +12669/-4521 | 221 |
|
|
||||||
| LexLuthr | 20 | +5972/-2815 | 120 |
|
|
||||||
| Steven Allen | 22 | +1626/-1264 | 77 |
|
|
||||||
| Piotr Galar | 9 | +790/-412 | 33 |
|
|
||||||
| Aayush Rajasekaran | 4 | +642/-509 | 12 |
|
|
||||||
| Lee | 1 | +601/-533 | 9 |
|
|
||||||
| qwdsds | 3 | +617/-510 | 11 |
|
|
||||||
| Phi | 11 | +551/-83 | 32 |
|
|
||||||
| Jiaying Wang | 5 | +433/-20 | 13 |
|
|
||||||
| Masih H. Derkani | 4 | +350/-101 | 18 |
|
|
||||||
| Aayush | 4 | +143/-76 | 17 |
|
|
||||||
| Aarsh Shah | 3 | +63/-11 | 5 |
|
|
||||||
| jennijuju | 3 | +22/-22 | 12 |
|
|
||||||
| hunjixin | 1 | +21/-14 | 4 |
|
|
||||||
| beck | 2 | +17/-17 | 2 |
|
|
||||||
| tom123222 | 2 | +28/-4 | 2 |
|
|
||||||
| Ian Norden | 1 | +21/-1 | 1 |
|
|
||||||
| ZenGround0 | 1 | +3/-15 | 1 |
|
|
||||||
| shuangcui | 1 | +7/-7 | 6 |
|
|
||||||
| Vid Bregar | 1 | +7/-4 | 2 |
|
|
||||||
| writegr | 1 | +5/-5 | 5 |
|
|
||||||
| Nagaprasad V R | 1 | +9/-0 | 1 |
|
|
||||||
| forcedebug | 1 | +4/-4 | 4 |
|
|
||||||
| parthshah1 | 2 | +6/-1 | 2 |
|
|
||||||
| fuyangpengqi | 1 | +3/-3 | 3 |
|
|
||||||
| Samuel Arogbonlo | 1 | +6/-0 | 2 |
|
|
||||||
| GlacierWalrus | 1 | +0/-6 | 1 |
|
|
||||||
| Aloxaf | 1 | +6/-0 | 2 |
|
|
||||||
| Rob Quist | 2 | +2/-3 | 3 |
|
|
||||||
| wersfeds | 1 | +2/-2 | 1 |
|
|
||||||
| Jon | 1 | +2/-0 | 1 |
|
|
||||||
| 0x5459 | 1 | +1/-0 | 1 |
|
|
||||||
|
|
||||||
# v1.26.3 / 2024-04-22
|
|
||||||
|
|
||||||
**This is a patch release that addresses high memory load concerns for the Lotus daemon in the coming network migration for network version 22, scheduled on epoch `3855360 - 2024-04-24 - 14:00:00Z`.**
|
|
||||||
|
|
||||||
If your Lotus daemon is running on a machine with less memory and swap than 160GB, you should upgrade to this patch release to ensure you do not encounter any Out-Of-Memory issues during the pre-migration.
|
|
||||||
|
|
||||||
# v1.26.2 / 2024-04-08
|
# v1.26.2 / 2024-04-08
|
||||||
|
|
||||||
**This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.**
|
**This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.**
|
||||||
@ -432,7 +74,6 @@ The Filecoin network version 22 delivers the following FIPs:
|
|||||||
lotus state actor-cids --network-version=22
|
lotus state actor-cids --network-version=22
|
||||||
Network Version: 22
|
Network Version: 22
|
||||||
Actor Version: 13
|
Actor Version: 13
|
||||||
|
|
||||||
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
|
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
|
||||||
|
|
||||||
Actor CID
|
Actor CID
|
||||||
@ -4110,7 +3751,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
|||||||
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
|
||||||
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
|
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
|
||||||
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
|
||||||
- You can now preview the default and updated node config by running `lotus/lotus-miner config default/updated`
|
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated`
|
||||||
|
|
||||||
## New Features
|
## New Features
|
||||||
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
|
||||||
@ -4217,7 +3858,6 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
|
|||||||
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
|
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
|
||||||
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
|
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
|
||||||
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
|
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
|
||||||
- chore: pin golanglint-ci to v1.58.2 ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054))
|
|
||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
|
||||||
@ -5558,7 +5198,7 @@ This consensus-breaking release of Lotus upgrades the actors version to v2.0.0.
|
|||||||
|
|
||||||
#### Mining
|
#### Mining
|
||||||
|
|
||||||
- Increased ExpectedSealDuration and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
|
- Increased ExpectedSealDuration and and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
|
||||||
- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
|
- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
|
||||||
- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
|
- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
|
||||||
- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)
|
- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)
|
||||||
|
58
Dockerfile
58
Dockerfile
@ -33,18 +33,19 @@ RUN set -eux; \
|
|||||||
COPY ./ /opt/filecoin
|
COPY ./ /opt/filecoin
|
||||||
WORKDIR /opt/filecoin
|
WORKDIR /opt/filecoin
|
||||||
|
|
||||||
RUN scripts/docker-git-state-check.sh
|
|
||||||
|
|
||||||
### make configurable filecoin-ffi build
|
|
||||||
ARG FFI_BUILD_FROM_SOURCE=0
|
ARG FFI_BUILD_FROM_SOURCE=0
|
||||||
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE}
|
||||||
|
|
||||||
RUN make clean deps
|
#RUN make clean deps
|
||||||
|
|
||||||
ARG RUSTFLAGS=""
|
ARG RUSTFLAGS=""
|
||||||
ARG GOFLAGS=""
|
ARG GOFLAGS=""
|
||||||
|
|
||||||
RUN make buildall
|
RUN make clean deps && \
|
||||||
|
make lotus lotus-shed lotus-stats && \
|
||||||
|
install -C ./lotus /usr/local/bin/lotus && \
|
||||||
|
install -C ./lotus-shed /usr/local/bin/lotus-shed && \
|
||||||
|
install -C ./lotus-stats /usr/local/bin/lotus-stats
|
||||||
|
|
||||||
#####################################
|
#####################################
|
||||||
FROM ubuntu:20.04 AS lotus-base
|
FROM ubuntu:20.04 AS lotus-base
|
||||||
@ -69,12 +70,11 @@ RUN useradd -r -u 532 -U fc \
|
|||||||
FROM lotus-base AS lotus
|
FROM lotus-base AS lotus
|
||||||
MAINTAINER Lotus Development Team
|
MAINTAINER Lotus Development Team
|
||||||
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
COPY --from=lotus-builder \
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
/usr/local/bin/lotus \
|
||||||
COPY scripts/docker-lotus-entrypoint.sh /
|
/usr/local/bin/lotus-shed \
|
||||||
|
/usr/local/bin/
|
||||||
|
|
||||||
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT=https://forest-archive.chainsafe.dev/latest/mainnet/
|
|
||||||
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
ENV DOCKER_LOTUS_IMPORT_WALLET ""
|
||||||
@ -89,48 +89,42 @@ USER fc
|
|||||||
|
|
||||||
EXPOSE 1234
|
EXPOSE 1234
|
||||||
|
|
||||||
ENTRYPOINT ["/docker-lotus-entrypoint.sh"]
|
|
||||||
|
|
||||||
CMD ["-help"]
|
CMD ["-help"]
|
||||||
|
|
||||||
#####################################
|
#####################################
|
||||||
FROM lotus-base AS lotus-all-in-one
|
FROM lotus-base AS lotus-all-in-one
|
||||||
|
|
||||||
|
# Install netcat for healthcheck
|
||||||
|
RUN apt-get update && apt-get install -y netcat && apt-get install -y iproute2
|
||||||
|
|
||||||
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
|
||||||
ENV LOTUS_MINER_PATH /var/lib/lotus-miner
|
|
||||||
ENV LOTUS_PATH /var/lib/lotus
|
ENV LOTUS_PATH /var/lib/lotus
|
||||||
ENV LOTUS_WORKER_PATH /var/lib/lotus-worker
|
|
||||||
ENV WALLET_PATH /var/lib/lotus-wallet
|
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT=https://forest-archive.chainsafe.dev/latest/mainnet/
|
||||||
|
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT=${DOCKER_LOTUS_IMPORT_SNAPSHOT}
|
||||||
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-seed /usr/local/bin/
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
|
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
|
||||||
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
|
COPY scripts/docker-lotus-entrypoint.sh /docker-lotus-entrypoint.sh
|
||||||
|
RUN chmod +x /docker-lotus-entrypoint.sh
|
||||||
|
|
||||||
RUN mkdir /var/tmp/filecoin-proof-parameters
|
RUN mkdir /var/tmp/filecoin-proof-parameters
|
||||||
RUN mkdir /var/lib/lotus
|
RUN mkdir /var/lib/lotus
|
||||||
RUN mkdir /var/lib/lotus-miner
|
|
||||||
RUN mkdir /var/lib/lotus-worker
|
|
||||||
RUN mkdir /var/lib/lotus-wallet
|
|
||||||
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
RUN chown fc: /var/tmp/filecoin-proof-parameters
|
||||||
RUN chown fc: /var/lib/lotus
|
RUN chown fc: /var/lib/lotus
|
||||||
RUN chown fc: /var/lib/lotus-miner
|
|
||||||
RUN chown fc: /var/lib/lotus-worker
|
|
||||||
RUN chown fc: /var/lib/lotus-wallet
|
|
||||||
|
|
||||||
|
|
||||||
VOLUME /var/tmp/filecoin-proof-parameters
|
VOLUME /var/tmp/filecoin-proof-parameters
|
||||||
VOLUME /var/lib/lotus
|
VOLUME /var/lib/lotus
|
||||||
VOLUME /var/lib/lotus-miner
|
#VOLUME /var/lib/lotus-miner
|
||||||
VOLUME /var/lib/lotus-worker
|
#VOLUME /var/lib/lotus-worker
|
||||||
VOLUME /var/lib/lotus-wallet
|
#VOLUME /var/lib/lotus-wallet
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
EXPOSE 1234
|
EXPOSE 1234
|
||||||
EXPOSE 2345
|
EXPOSE 1235
|
||||||
EXPOSE 3456
|
|
||||||
EXPOSE 1777
|
|
||||||
|
74
Makefile
74
Makefile
@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
|
|||||||
deps: $(BUILD_DEPS)
|
deps: $(BUILD_DEPS)
|
||||||
.PHONY: deps
|
.PHONY: deps
|
||||||
|
|
||||||
build-devnets: build lotus-seed lotus-shed
|
build-devnets: build lotus-seed lotus-shed lotus-provider
|
||||||
.PHONY: build-devnets
|
.PHONY: build-devnets
|
||||||
|
|
||||||
debug: GOFLAGS+=-tags=debug
|
debug: GOFLAGS+=-tags=debug
|
||||||
@ -97,6 +97,15 @@ lotus-miner: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-miner
|
.PHONY: lotus-miner
|
||||||
BINS+=lotus-miner
|
BINS+=lotus-miner
|
||||||
|
|
||||||
|
lotus-provider: $(BUILD_DEPS)
|
||||||
|
rm -f lotus-provider
|
||||||
|
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider
|
||||||
|
.PHONY: lotus-provider
|
||||||
|
BINS+=lotus-provider
|
||||||
|
|
||||||
|
lp2k: GOFLAGS+=-tags=2k
|
||||||
|
lp2k: lotus-provider
|
||||||
|
|
||||||
lotus-worker: $(BUILD_DEPS)
|
lotus-worker: $(BUILD_DEPS)
|
||||||
rm -f lotus-worker
|
rm -f lotus-worker
|
||||||
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
|
$(GOCC) build $(GOFLAGS) -o lotus-worker ./cmd/lotus-worker
|
||||||
@ -115,13 +124,13 @@ lotus-gateway: $(BUILD_DEPS)
|
|||||||
.PHONY: lotus-gateway
|
.PHONY: lotus-gateway
|
||||||
BINS+=lotus-gateway
|
BINS+=lotus-gateway
|
||||||
|
|
||||||
build: lotus lotus-miner lotus-worker
|
build: lotus lotus-miner lotus-worker lotus-provider
|
||||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
|
|
||||||
install: install-daemon install-miner install-worker
|
install: install-daemon install-miner install-worker install-provider
|
||||||
|
|
||||||
install-daemon:
|
install-daemon:
|
||||||
install -C ./lotus /usr/local/bin/lotus
|
install -C ./lotus /usr/local/bin/lotus
|
||||||
@ -129,6 +138,9 @@ install-daemon:
|
|||||||
install-miner:
|
install-miner:
|
||||||
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
||||||
|
|
||||||
|
install-provider:
|
||||||
|
install -C ./lotus-provider /usr/local/bin/lotus-provider
|
||||||
|
|
||||||
install-worker:
|
install-worker:
|
||||||
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
||||||
|
|
||||||
@ -144,6 +156,9 @@ uninstall-daemon:
|
|||||||
uninstall-miner:
|
uninstall-miner:
|
||||||
rm -f /usr/local/bin/lotus-miner
|
rm -f /usr/local/bin/lotus-miner
|
||||||
|
|
||||||
|
uninstall-provider:
|
||||||
|
rm -f /usr/local/bin/lotus-provider
|
||||||
|
|
||||||
uninstall-worker:
|
uninstall-worker:
|
||||||
rm -f /usr/local/bin/lotus-worker
|
rm -f /usr/local/bin/lotus-worker
|
||||||
|
|
||||||
@ -231,9 +246,7 @@ install-daemon-service: install-daemon
|
|||||||
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
|
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
@echo
|
@echo
|
||||||
@echo "lotus-daemon service installed."
|
@echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup."
|
||||||
@echo "To start the service, run: 'sudo systemctl start lotus-daemon'"
|
|
||||||
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-daemon'"
|
|
||||||
|
|
||||||
install-miner-service: install-miner install-daemon-service
|
install-miner-service: install-miner install-daemon-service
|
||||||
mkdir -p /etc/systemd/system
|
mkdir -p /etc/systemd/system
|
||||||
@ -241,9 +254,15 @@ install-miner-service: install-miner install-daemon-service
|
|||||||
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
|
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
@echo
|
@echo
|
||||||
@echo "lotus-miner service installed."
|
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
|
||||||
@echo "To start the service, run: 'sudo systemctl start lotus-miner'"
|
|
||||||
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'"
|
install-provider-service: install-provider install-daemon-service
|
||||||
|
mkdir -p /etc/systemd/system
|
||||||
|
mkdir -p /var/log/lotus
|
||||||
|
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service
|
||||||
|
systemctl daemon-reload
|
||||||
|
@echo
|
||||||
|
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup."
|
||||||
|
|
||||||
install-main-services: install-miner-service
|
install-main-services: install-miner-service
|
||||||
|
|
||||||
@ -263,6 +282,12 @@ clean-miner-service:
|
|||||||
rm -f /etc/systemd/system/lotus-miner.service
|
rm -f /etc/systemd/system/lotus-miner.service
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
|
|
||||||
|
clean-provider-service:
|
||||||
|
-systemctl stop lotus-provider
|
||||||
|
-systemctl disable lotus-provider
|
||||||
|
rm -f /etc/systemd/system/lotus-provider.service
|
||||||
|
systemctl daemon-reload
|
||||||
|
|
||||||
clean-main-services: clean-daemon-service
|
clean-main-services: clean-daemon-service
|
||||||
|
|
||||||
clean-all-services: clean-main-services
|
clean-all-services: clean-main-services
|
||||||
@ -278,10 +303,6 @@ install-completions:
|
|||||||
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
||||||
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
||||||
|
|
||||||
unittests:
|
|
||||||
@$(GOCC) test $(shell go list ./... | grep -v /lotus/itests)
|
|
||||||
.PHONY: unittests
|
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf $(CLEAN) $(BINS)
|
rm -rf $(CLEAN) $(BINS)
|
||||||
-$(MAKE) -C $(FFI_PATH) clean
|
-$(MAKE) -C $(FFI_PATH) clean
|
||||||
@ -303,7 +324,7 @@ actors-code-gen:
|
|||||||
$(GOCC) fmt ./...
|
$(GOCC) fmt ./...
|
||||||
|
|
||||||
actors-gen: actors-code-gen
|
actors-gen: actors-code-gen
|
||||||
$(GOCC) run ./scripts/fiximports
|
./scripts/fiximports
|
||||||
.PHONY: actors-gen
|
.PHONY: actors-gen
|
||||||
|
|
||||||
bundle-gen:
|
bundle-gen:
|
||||||
@ -337,7 +358,7 @@ docsgen-md-bin: api-gen actors-gen
|
|||||||
docsgen-openrpc-bin: api-gen actors-gen
|
docsgen-openrpc-bin: api-gen actors-gen
|
||||||
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
|
||||||
|
|
||||||
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker
|
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider
|
||||||
|
|
||||||
docsgen-md-full: docsgen-md-bin
|
docsgen-md-full: docsgen-md-bin
|
||||||
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
|
||||||
@ -346,41 +367,46 @@ docsgen-md-storage: docsgen-md-bin
|
|||||||
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
|
||||||
docsgen-md-worker: docsgen-md-bin
|
docsgen-md-worker: docsgen-md-bin
|
||||||
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
|
||||||
|
docsgen-md-provider: docsgen-md-bin
|
||||||
|
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md
|
||||||
|
|
||||||
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
|
||||||
|
|
||||||
docsgen-openrpc-full: docsgen-openrpc-bin
|
docsgen-openrpc-full: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" > build/openrpc/full.json
|
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz
|
||||||
docsgen-openrpc-storage: docsgen-openrpc-bin
|
docsgen-openrpc-storage: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" > build/openrpc/miner.json
|
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz
|
||||||
docsgen-openrpc-worker: docsgen-openrpc-bin
|
docsgen-openrpc-worker: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" > build/openrpc/worker.json
|
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz
|
||||||
docsgen-openrpc-gateway: docsgen-openrpc-bin
|
docsgen-openrpc-gateway: docsgen-openrpc-bin
|
||||||
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" > build/openrpc/gateway.json
|
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" -gzip > build/openrpc/gateway.json.gz
|
||||||
|
|
||||||
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
|
||||||
|
|
||||||
fiximports:
|
fiximports:
|
||||||
$(GOCC) run ./scripts/fiximports
|
./scripts/fiximports
|
||||||
|
|
||||||
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen
|
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci
|
||||||
$(GOCC) run ./scripts/fiximports
|
./scripts/fiximports
|
||||||
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
|
||||||
.PHONY: gen
|
.PHONY: gen
|
||||||
|
|
||||||
jen: gen
|
jen: gen
|
||||||
|
|
||||||
snap: lotus lotus-miner lotus-worker
|
snap: lotus lotus-miner lotus-worker lotus-provider
|
||||||
snapcraft
|
snapcraft
|
||||||
# snapcraft upload ./lotus_*.snap
|
# snapcraft upload ./lotus_*.snap
|
||||||
|
|
||||||
# separate from gen because it needs binaries
|
# separate from gen because it needs binaries
|
||||||
docsgen-cli: lotus lotus-miner lotus-worker
|
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider
|
||||||
python3 ./scripts/generate-lotus-cli.py
|
python3 ./scripts/generate-lotus-cli.py
|
||||||
./lotus config default > documentation/en/default-lotus-config.toml
|
./lotus config default > documentation/en/default-lotus-config.toml
|
||||||
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
|
||||||
|
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml
|
||||||
.PHONY: docsgen-cli
|
.PHONY: docsgen-cli
|
||||||
|
|
||||||
print-%:
|
print-%:
|
||||||
@echo $*=$($*)
|
@echo $*=$($*)
|
||||||
|
|
||||||
|
circleci:
|
||||||
|
go generate -x ./.circleci
|
||||||
|
@ -7,12 +7,10 @@
|
|||||||
<h1 align="center">Project Lotus - 莲</h1>
|
<h1 align="center">Project Lotus - 莲</h1>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
![example workflow](https://github.com/github/docs/actions/workflows/main.yml/badge.svg)
|
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
||||||
<a href="https://github.com/filecoin-project/lotus/actions/workflows/build.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/build.yml/badge.svg"></a>
|
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
||||||
<a href="https://github.com/filecoin-project/lotus/actions/workflows/check.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/check.yml/badge.svg"></a>
|
|
||||||
<a href="https://github.com/filecoin-project/lotus/actions/workflows/test.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/test.yml/badge.svg"></a>
|
|
||||||
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
||||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.21.7-blue.svg" /></a>
|
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.8-blue.svg" /></a>
|
||||||
<br>
|
<br>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
255
api/api_full.go
255
api/api_full.go
@ -6,11 +6,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc"
|
"github.com/filecoin-project/go-jsonrpc"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
@ -29,6 +34,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
|
//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_full.go -package=mocks . FullNode
|
||||||
@ -329,7 +335,7 @@ type FullNode interface {
|
|||||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
// WalletSetDefault marks the given address as the default one.
|
// WalletSetDefault marks the given address as as the default one.
|
||||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
// WalletExport returns the private key of an address in the wallet.
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
@ -342,6 +348,74 @@ type FullNode interface {
|
|||||||
|
|
||||||
// Other
|
// Other
|
||||||
|
|
||||||
|
// MethodGroup: Client
|
||||||
|
// The Client methods all have to do with interacting with the storage and
|
||||||
|
// retrieval markets as a client
|
||||||
|
|
||||||
|
// ClientImport imports file under the specified path into filestore.
|
||||||
|
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error) //perm:admin
|
||||||
|
// ClientRemoveImport removes file import
|
||||||
|
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||||
|
// ClientStartDeal proposes a deal with a miner.
|
||||||
|
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
|
||||||
|
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||||
|
ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
|
||||||
|
// ClientGetDealInfo returns the latest information about a given deal.
|
||||||
|
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
|
||||||
|
// ClientListDeals returns information about the deals made by the local client.
|
||||||
|
ClientListDeals(ctx context.Context) ([]DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealUpdates returns the status of updated deals
|
||||||
|
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealStatus returns status given a code
|
||||||
|
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||||
|
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||||
|
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||||
|
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||||
|
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error) //perm:read
|
||||||
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read
|
||||||
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
|
ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin
|
||||||
|
// ClientRetrieveWait waits for retrieval to be complete
|
||||||
|
ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin
|
||||||
|
// ClientExport exports a file stored in the local filestore to a system file
|
||||||
|
ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin
|
||||||
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
|
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
|
||||||
|
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||||
|
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
|
||||||
|
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||||
|
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*StorageAsk, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||||
|
ClientDealPieceCID(ctx context.Context, root cid.Cid) (DataCIDSize, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP for a specified file
|
||||||
|
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error) //perm:write
|
||||||
|
// ClientGenCar generates a CAR file for the specified file.
|
||||||
|
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error //perm:write
|
||||||
|
// ClientDealSize calculates real deal data size
|
||||||
|
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error) //perm:read
|
||||||
|
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||||
|
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||||
|
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||||
|
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
|
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
|
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||||
|
// which are stuck due to insufficient funds
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||||
|
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||||
|
|
||||||
|
// ClientUnimport removes references to the specified file from filestore
|
||||||
|
// ClientUnimport(path string)
|
||||||
|
|
||||||
|
// ClientListImports lists imported files and their root CIDs
|
||||||
|
ClientListImports(ctx context.Context) ([]Import, error) //perm:write
|
||||||
|
|
||||||
|
// ClientListAsks() []Ask
|
||||||
|
|
||||||
// MethodGroup: State
|
// MethodGroup: State
|
||||||
// The State methods are used to query, inspect, and interact with chain state.
|
// The State methods are used to query, inspect, and interact with chain state.
|
||||||
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||||
@ -824,15 +898,15 @@ type FullNode interface {
|
|||||||
// Replays all transactions in a block returning the requested traces for each transaction
|
// Replays all transactions in a block returning the requested traces for each transaction
|
||||||
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
|
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error) //perm:read
|
||||||
|
|
||||||
// Implmements OpenEthereum-compatible API method trace_transaction
|
|
||||||
EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error) //perm:read
|
|
||||||
|
|
||||||
// CreateBackup creates node backup onder the specified file name. The
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
// method requires that the lotus daemon is running with the
|
// method requires that the lotus daemon is running with the
|
||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
|
|
||||||
|
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
|
||||||
|
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
|
||||||
|
|
||||||
// Actor events
|
// Actor events
|
||||||
|
|
||||||
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given
|
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given
|
||||||
@ -867,6 +941,17 @@ type EthSubscriber interface {
|
|||||||
EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true
|
EthSubscription(ctx context.Context, r jsonrpc.RawParams) error // rpc_method:eth_subscription notify:true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type StorageAsk struct {
|
||||||
|
Response *storagemarket.StorageAsk
|
||||||
|
|
||||||
|
DealProtocols []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileRef struct {
|
||||||
|
Path string
|
||||||
|
IsCAR bool
|
||||||
|
}
|
||||||
|
|
||||||
type MinerSectors struct {
|
type MinerSectors struct {
|
||||||
// Live sectors that should be proven.
|
// Live sectors that should be proven.
|
||||||
Live uint64
|
Live uint64
|
||||||
@ -876,6 +961,55 @@ type MinerSectors struct {
|
|||||||
Faulty uint64
|
Faulty uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ImportRes struct {
|
||||||
|
Root cid.Cid
|
||||||
|
ImportID imports.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
type Import struct {
|
||||||
|
Key imports.ID
|
||||||
|
Err string
|
||||||
|
|
||||||
|
Root *cid.Cid
|
||||||
|
|
||||||
|
// Source is the provenance of the import, e.g. "import", "unknown", else.
|
||||||
|
// Currently useless but may be used in the future.
|
||||||
|
Source string
|
||||||
|
|
||||||
|
// FilePath is the path of the original file. It is important that the file
|
||||||
|
// is retained at this path, because it will be referenced during
|
||||||
|
// the transfer (when we do the UnixFS chunking, we don't duplicate the
|
||||||
|
// leaves, but rather point to chunks of the original data through
|
||||||
|
// positional references).
|
||||||
|
FilePath string
|
||||||
|
|
||||||
|
// CARPath is the path of the CAR file containing the DAG for this import.
|
||||||
|
CARPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DealInfo struct {
|
||||||
|
ProposalCid cid.Cid
|
||||||
|
State storagemarket.StorageDealStatus
|
||||||
|
Message string // more information about deal state, particularly errors
|
||||||
|
DealStages *storagemarket.DealStages
|
||||||
|
Provider address.Address
|
||||||
|
|
||||||
|
DataRef *storagemarket.DataRef
|
||||||
|
PieceCID cid.Cid
|
||||||
|
Size uint64
|
||||||
|
|
||||||
|
PricePerEpoch types.BigInt
|
||||||
|
Duration uint64
|
||||||
|
|
||||||
|
DealID abi.DealID
|
||||||
|
|
||||||
|
CreationTime time.Time
|
||||||
|
Verified bool
|
||||||
|
|
||||||
|
TransferChannelID *datatransfer.ChannelID
|
||||||
|
DataTransfer *DataTransferChannel
|
||||||
|
}
|
||||||
|
|
||||||
type MsgLookup struct {
|
type MsgLookup struct {
|
||||||
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||||
Receipt types.MessageReceipt
|
Receipt types.MessageReceipt
|
||||||
@ -997,21 +1131,51 @@ type MinerPower struct {
|
|||||||
HasMinPower bool
|
HasMinPower bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type QueryOffer struct {
|
||||||
|
Err string
|
||||||
|
|
||||||
|
Root cid.Cid
|
||||||
|
Piece *cid.Cid
|
||||||
|
|
||||||
|
Size uint64
|
||||||
|
MinPrice types.BigInt
|
||||||
|
UnsealPrice types.BigInt
|
||||||
|
PricePerByte abi.TokenAmount
|
||||||
|
PaymentInterval uint64
|
||||||
|
PaymentIntervalIncrease uint64
|
||||||
|
Miner address.Address
|
||||||
|
MinerPeer retrievalmarket.RetrievalPeer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
|
||||||
|
return RetrievalOrder{
|
||||||
|
Root: o.Root,
|
||||||
|
Piece: o.Piece,
|
||||||
|
Size: o.Size,
|
||||||
|
Total: o.MinPrice,
|
||||||
|
UnsealPrice: o.UnsealPrice,
|
||||||
|
PaymentInterval: o.PaymentInterval,
|
||||||
|
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||||
|
Client: client,
|
||||||
|
|
||||||
|
Miner: o.Miner,
|
||||||
|
MinerPeer: &o.MinerPeer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type MarketBalance struct {
|
type MarketBalance struct {
|
||||||
Escrow big.Int
|
Escrow big.Int
|
||||||
Locked big.Int
|
Locked big.Int
|
||||||
}
|
}
|
||||||
|
|
||||||
type MarketDealState struct {
|
type MarketDealState struct {
|
||||||
SectorNumber abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number).
|
SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
|
||||||
SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
|
LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
|
||||||
LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
|
SlashEpoch abi.ChainEpoch // -1 if deal never slashed
|
||||||
SlashEpoch abi.ChainEpoch // -1 if deal never slashed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakeDealState(mds market.DealState) MarketDealState {
|
func MakeDealState(mds market.DealState) MarketDealState {
|
||||||
return MarketDealState{
|
return MarketDealState{
|
||||||
SectorNumber: mds.SectorNumber(),
|
|
||||||
SectorStartEpoch: mds.SectorStartEpoch(),
|
SectorStartEpoch: mds.SectorStartEpoch(),
|
||||||
LastUpdatedEpoch: mds.LastUpdatedEpoch(),
|
LastUpdatedEpoch: mds.LastUpdatedEpoch(),
|
||||||
SlashEpoch: mds.SlashEpoch(),
|
SlashEpoch: mds.SlashEpoch(),
|
||||||
@ -1022,10 +1186,6 @@ type mstate struct {
|
|||||||
s MarketDealState
|
s MarketDealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m mstate) SectorNumber() abi.SectorNumber {
|
|
||||||
return m.s.SectorNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m mstate) SectorStartEpoch() abi.ChainEpoch {
|
func (m mstate) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return m.s.SectorStartEpoch
|
return m.s.SectorStartEpoch
|
||||||
}
|
}
|
||||||
@ -1051,6 +1211,27 @@ type MarketDeal struct {
|
|||||||
State MarketDealState
|
State MarketDealState
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RetrievalOrder struct {
|
||||||
|
Root cid.Cid
|
||||||
|
Piece *cid.Cid
|
||||||
|
DataSelector *Selector
|
||||||
|
|
||||||
|
// todo: Size/Total are only used for calculating price per byte; we should let users just pass that
|
||||||
|
Size uint64
|
||||||
|
Total types.BigInt
|
||||||
|
|
||||||
|
UnsealPrice types.BigInt
|
||||||
|
PaymentInterval uint64
|
||||||
|
PaymentIntervalIncrease uint64
|
||||||
|
Client address.Address
|
||||||
|
Miner address.Address
|
||||||
|
MinerPeer *retrievalmarket.RetrievalPeer
|
||||||
|
|
||||||
|
RemoteStore *RemoteStoreID `json:"RemoteStore,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RemoteStoreID = uuid.UUID
|
||||||
|
|
||||||
type InvocResult struct {
|
type InvocResult struct {
|
||||||
MsgCid cid.Cid
|
MsgCid cid.Cid
|
||||||
Msg *types.Message
|
Msg *types.Message
|
||||||
@ -1061,6 +1242,39 @@ type InvocResult struct {
|
|||||||
Duration time.Duration
|
Duration time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MethodCall struct {
|
||||||
|
types.MessageReceipt
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
type StartDealParams struct {
|
||||||
|
Data *storagemarket.DataRef
|
||||||
|
Wallet address.Address
|
||||||
|
Miner address.Address
|
||||||
|
EpochPrice types.BigInt
|
||||||
|
MinBlocksDuration uint64
|
||||||
|
ProviderCollateral big.Int
|
||||||
|
DealStartEpoch abi.ChainEpoch
|
||||||
|
FastRetrieval bool
|
||||||
|
VerifiedDeal bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StartDealParams) UnmarshalJSON(raw []byte) (err error) {
|
||||||
|
type sdpAlias StartDealParams
|
||||||
|
|
||||||
|
sdp := sdpAlias{
|
||||||
|
FastRetrieval: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(raw, &sdp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*s = StartDealParams(sdp)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type IpldObject struct {
|
type IpldObject struct {
|
||||||
Cid cid.Cid
|
Cid cid.Cid
|
||||||
Obj interface{}
|
Obj interface{}
|
||||||
@ -1172,6 +1386,21 @@ type BlockTemplate struct {
|
|||||||
WinningPoStProof []builtin.PoStProof
|
WinningPoStProof []builtin.PoStProof
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DataSize struct {
|
||||||
|
PayloadSize int64
|
||||||
|
PieceSize abi.PaddedPieceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataCIDSize struct {
|
||||||
|
PayloadSize int64
|
||||||
|
PieceSize abi.PaddedPieceSize
|
||||||
|
PieceCID cid.Cid
|
||||||
|
}
|
||||||
|
|
||||||
|
type CommPRet struct {
|
||||||
|
Root cid.Cid
|
||||||
|
Size abi.UnpaddedPieceSize
|
||||||
|
}
|
||||||
type HeadChange struct {
|
type HeadChange struct {
|
||||||
Type string
|
Type string
|
||||||
Val *types.TipSet
|
Val *types.TipSet
|
||||||
|
@ -77,7 +77,6 @@ type Gateway interface {
|
|||||||
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
|
||||||
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
|
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
|
||||||
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
|
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
|
||||||
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
|
|
||||||
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
|
||||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
||||||
@ -91,8 +90,6 @@ type Gateway interface {
|
|||||||
Version(context.Context) (APIVersion, error)
|
Version(context.Context) (APIVersion, error)
|
||||||
Discover(context.Context) (apitypes.OpenRPCDocument, error)
|
Discover(context.Context) (apitypes.OpenRPCDocument, error)
|
||||||
|
|
||||||
EthAddressToFilecoinAddress(ctx context.Context, ethAddress ethtypes.EthAddress) (address.Address, error)
|
|
||||||
FilecoinAddressToEthAddress(ctx context.Context, filecoinAddress address.Address) (ethtypes.EthAddress, error)
|
|
||||||
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
|
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
|
||||||
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error)
|
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error)
|
||||||
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error)
|
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error)
|
||||||
@ -132,7 +129,6 @@ type Gateway interface {
|
|||||||
Web3ClientVersion(ctx context.Context) (string, error)
|
Web3ClientVersion(ctx context.Context) (string, error)
|
||||||
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
|
EthTraceBlock(ctx context.Context, blkNum string) ([]*ethtypes.EthTraceBlock, error)
|
||||||
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
|
EthTraceReplayBlockTransactions(ctx context.Context, blkNum string, traceTypes []string) ([]*ethtypes.EthTraceReplayBlockTransaction, error)
|
||||||
EthTraceTransaction(ctx context.Context, txHash string) ([]*ethtypes.EthTraceTransaction, error)
|
|
||||||
|
|
||||||
GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error)
|
GetActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) ([]*types.ActorEvent, error)
|
||||||
SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error)
|
SubscribeActorEventsRaw(ctx context.Context, filter *types.ActorEventFilter) (<-chan *types.ActorEvent, error)
|
||||||
|
10
api/api_lp.go
Normal file
10
api/api_lp.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type LotusProvider interface {
|
||||||
|
Version(context.Context) (Version, error) //perm:admin
|
||||||
|
|
||||||
|
// Trigger shutdown
|
||||||
|
Shutdown(context.Context) error //perm:admin
|
||||||
|
}
|
@ -66,6 +66,11 @@ type Net interface {
|
|||||||
ID(context.Context) (peer.ID, error) //perm:read
|
ID(context.Context) (peer.ID, error) //perm:read
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CommonNet interface {
|
||||||
|
Common
|
||||||
|
Net
|
||||||
|
}
|
||||||
|
|
||||||
type NatInfo struct {
|
type NatInfo struct {
|
||||||
Reachability network.Reachability
|
Reachability network.Reachability
|
||||||
PublicAddrs []string
|
PublicAddrs []string
|
||||||
|
@ -7,9 +7,14 @@ import (
|
|||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/big"
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
@ -39,6 +44,7 @@ import (
|
|||||||
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
||||||
type StorageMiner interface {
|
type StorageMiner interface {
|
||||||
Common
|
Common
|
||||||
|
Net
|
||||||
|
|
||||||
ActorAddress(context.Context) (address.Address, error) //perm:read
|
ActorAddress(context.Context) (address.Address, error) //perm:read
|
||||||
|
|
||||||
@ -194,11 +200,11 @@ type StorageMiner interface {
|
|||||||
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
|
||||||
// Paths with more weight and more % of free space are preferred.
|
// Paths with more weight and more % of free space are preferred.
|
||||||
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
|
||||||
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType, miner abi.ActorID) ([]storiface.StorageInfo, error) //perm:admin
|
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin
|
||||||
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
|
||||||
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
|
||||||
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
|
||||||
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin
|
||||||
|
|
||||||
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
|
||||||
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
|
||||||
@ -209,12 +215,110 @@ type StorageMiner interface {
|
|||||||
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
|
||||||
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
StorageRedeclareLocal(ctx context.Context, id *storiface.ID, dropMissing bool) error //perm:admin
|
||||||
|
|
||||||
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error //perm:write
|
||||||
|
MarketListDeals(ctx context.Context) ([]*MarketDeal, error) //perm:read
|
||||||
|
|
||||||
|
// MarketListRetrievalDeals is deprecated, returns empty list
|
||||||
|
MarketListRetrievalDeals(ctx context.Context) ([]struct{}, error) //perm:read
|
||||||
|
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error) //perm:read
|
||||||
|
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) //perm:read
|
||||||
|
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error //perm:admin
|
||||||
|
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) //perm:read
|
||||||
|
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error //perm:admin
|
||||||
|
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read
|
||||||
|
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write
|
||||||
|
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write
|
||||||
|
// MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync
|
||||||
|
MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write
|
||||||
|
// MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
|
MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
|
MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write
|
||||||
|
MarketPublishPendingDeals(ctx context.Context) error //perm:admin
|
||||||
|
MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
|
// DagstoreListShards returns information about all shards known to the
|
||||||
|
// DAG store. Only available on nodes running the markets subsystem.
|
||||||
|
DagstoreListShards(ctx context.Context) ([]DagstoreShardInfo, error) //perm:read
|
||||||
|
|
||||||
|
// DagstoreInitializeShard initializes an uninitialized shard.
|
||||||
|
//
|
||||||
|
// Initialization consists of fetching the shard's data (deal payload) from
|
||||||
|
// the storage subsystem, generating an index, and persisting the index
|
||||||
|
// to facilitate later retrievals, and/or to publish to external sources.
|
||||||
|
//
|
||||||
|
// This operation is intended to complement the initial migration. The
|
||||||
|
// migration registers a shard for every unique piece CID, with lazy
|
||||||
|
// initialization. Thus, shards are not initialized immediately to avoid
|
||||||
|
// IO activity competing with proving. Instead, shard are initialized
|
||||||
|
// when first accessed. This method forces the initialization of a shard by
|
||||||
|
// accessing it and immediately releasing it. This is useful to warm up the
|
||||||
|
// cache to facilitate subsequent retrievals, and to generate the indexes
|
||||||
|
// to publish them externally.
|
||||||
|
//
|
||||||
|
// This operation fails if the shard is not in ShardStateNew state.
|
||||||
|
// It blocks until initialization finishes.
|
||||||
|
DagstoreInitializeShard(ctx context.Context, key string) error //perm:write
|
||||||
|
|
||||||
|
// DagstoreRecoverShard attempts to recover a failed shard.
|
||||||
|
//
|
||||||
|
// This operation fails if the shard is not in ShardStateErrored state.
|
||||||
|
// It blocks until recovery finishes. If recovery failed, it returns the
|
||||||
|
// error.
|
||||||
|
DagstoreRecoverShard(ctx context.Context, key string) error //perm:write
|
||||||
|
|
||||||
|
// DagstoreInitializeAll initializes all uninitialized shards in bulk,
|
||||||
|
// according to the policy passed in the parameters.
|
||||||
|
//
|
||||||
|
// It is recommended to set a maximum concurrency to avoid extreme
|
||||||
|
// IO pressure if the storage subsystem has a large amount of deals.
|
||||||
|
//
|
||||||
|
// It returns a stream of events to report progress.
|
||||||
|
DagstoreInitializeAll(ctx context.Context, params DagstoreInitializeAllParams) (<-chan DagstoreInitializeAllEvent, error) //perm:write
|
||||||
|
|
||||||
|
// DagstoreGC runs garbage collection on the DAG store.
|
||||||
|
DagstoreGC(ctx context.Context) ([]DagstoreShardResult, error) //perm:admin
|
||||||
|
|
||||||
|
// DagstoreRegisterShard registers a shard manually with dagstore with given pieceCID
|
||||||
|
DagstoreRegisterShard(ctx context.Context, key string) error //perm:admin
|
||||||
|
|
||||||
|
// IndexerAnnounceDeal informs indexer nodes that a new deal was received,
|
||||||
|
// so they can download its index
|
||||||
|
IndexerAnnounceDeal(ctx context.Context, proposalCid cid.Cid) error //perm:admin
|
||||||
|
|
||||||
|
// IndexerAnnounceAllDeals informs the indexer nodes aboutall active deals.
|
||||||
|
IndexerAnnounceAllDeals(ctx context.Context) error //perm:admin
|
||||||
|
|
||||||
|
// DagstoreLookupPieces returns information about shards that contain the given CID.
|
||||||
|
DagstoreLookupPieces(ctx context.Context, cid cid.Cid) ([]DagstoreShardInfo, error) //perm:admin
|
||||||
|
|
||||||
// RuntimeSubsystems returns the subsystems that are enabled
|
// RuntimeSubsystems returns the subsystems that are enabled
|
||||||
// in this instance.
|
// in this instance.
|
||||||
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
|
RuntimeSubsystems(ctx context.Context) (MinerSubsystems, error) //perm:read
|
||||||
|
|
||||||
|
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error //perm:admin
|
||||||
|
DealsList(ctx context.Context) ([]*MarketDeal, error) //perm:admin
|
||||||
|
DealsConsiderOnlineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) //perm:admin
|
||||||
|
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error //perm:admin
|
||||||
|
DealsConsiderOfflineStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderVerifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderVerifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
DealsConsiderUnverifiedStorageDeals(context.Context) (bool, error) //perm:admin
|
||||||
|
DealsSetConsiderUnverifiedStorageDeals(context.Context, bool) error //perm:admin
|
||||||
|
|
||||||
|
PiecesListPieces(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
|
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error) //perm:read
|
||||||
|
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error) //perm:read
|
||||||
|
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error) //perm:read
|
||||||
|
|
||||||
// CreateBackup creates node backup onder the specified file name. The
|
// CreateBackup creates node backup onder the specified file name. The
|
||||||
// method requires that the lotus-miner is running with the
|
// method requires that the lotus-miner is running with the
|
||||||
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that
|
||||||
@ -367,6 +471,37 @@ type SectorOffset struct {
|
|||||||
Offset abi.PaddedPieceSize
|
Offset abi.PaddedPieceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DagstoreShardInfo is the serialized form of dagstore.DagstoreShardInfo that
|
||||||
|
// we expose through JSON-RPC to avoid clients having to depend on the
|
||||||
|
// dagstore lib.
|
||||||
|
type DagstoreShardInfo struct {
|
||||||
|
Key string
|
||||||
|
State string
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
// DagstoreShardResult enumerates results per shard.
|
||||||
|
type DagstoreShardResult struct {
|
||||||
|
Key string
|
||||||
|
Success bool
|
||||||
|
Error string
|
||||||
|
}
|
||||||
|
|
||||||
|
type DagstoreInitializeAllParams struct {
|
||||||
|
MaxConcurrency int
|
||||||
|
IncludeSealed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DagstoreInitializeAllEvent represents an initialization event.
|
||||||
|
type DagstoreInitializeAllEvent struct {
|
||||||
|
Key string
|
||||||
|
Event string // "start", "end"
|
||||||
|
Success bool
|
||||||
|
Error string
|
||||||
|
Total int
|
||||||
|
Current int
|
||||||
|
}
|
||||||
|
|
||||||
type NumAssignerMeta struct {
|
type NumAssignerMeta struct {
|
||||||
Reserved bitfield.BitField
|
Reserved bitfield.BitField
|
||||||
Allocated bitfield.BitField
|
Allocated bitfield.BitField
|
||||||
|
@ -15,9 +15,19 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NewProviderRpc creates a new http jsonrpc client.
|
||||||
|
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) {
|
||||||
|
var res v1api.LotusProviderStruct
|
||||||
|
|
||||||
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
|
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||||
|
|
||||||
|
return &res, closer, err
|
||||||
|
}
|
||||||
|
|
||||||
// NewCommonRPCV0 creates a new http jsonrpc client.
|
// NewCommonRPCV0 creates a new http jsonrpc client.
|
||||||
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
func NewCommonRPCV0(ctx context.Context, addr string, requestHeader http.Header) (api.CommonNet, jsonrpc.ClientCloser, error) {
|
||||||
var res v0api.CommonStruct
|
var res v0api.CommonNetStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||||
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ func NewLotusOpenRPCDocument(Comments, GroupDocs map[string]string) *go_openrpc_
|
|||||||
title := "Lotus RPC API"
|
title := "Lotus RPC API"
|
||||||
info.Title = (*meta_schema.InfoObjectProperties)(&title)
|
info.Title = (*meta_schema.InfoObjectProperties)(&title)
|
||||||
|
|
||||||
version := build.NodeBuildVersion
|
version := build.BuildVersion
|
||||||
info.Version = (*meta_schema.InfoObjectVersion)(&version)
|
info.Version = (*meta_schema.InfoObjectVersion)(&version)
|
||||||
return info
|
return info
|
||||||
},
|
},
|
||||||
|
@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipfs/go-graphsync"
|
||||||
|
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/metrics"
|
"github.com/libp2p/go-libp2p/core/metrics"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
@ -25,6 +27,9 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/filestore"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
"github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
@ -39,6 +44,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
"github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
sealing "github.com/filecoin-project/lotus/storage/pipeline"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
"github.com/filecoin-project/lotus/storage/sealer/sealtasks"
|
||||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||||
@ -90,6 +96,11 @@ func init() {
|
|||||||
addExample(pid)
|
addExample(pid)
|
||||||
addExample(&pid)
|
addExample(&pid)
|
||||||
|
|
||||||
|
storeIDExample := imports.ID(50)
|
||||||
|
textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash")
|
||||||
|
apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash")
|
||||||
|
clientEvent := retrievalmarket.ClientEventDealAccepted
|
||||||
|
|
||||||
block := blocks.Block(&blocks.BasicBlock{})
|
block := blocks.Block(&blocks.BasicBlock{})
|
||||||
ExampleValues[reflect.TypeOf(&block).Elem()] = block
|
ExampleValues[reflect.TypeOf(&block).Elem()] = block
|
||||||
|
|
||||||
@ -119,13 +130,22 @@ func init() {
|
|||||||
addExample(api.FullAPIVersion1)
|
addExample(api.FullAPIVersion1)
|
||||||
addExample(api.PCHInbound)
|
addExample(api.PCHInbound)
|
||||||
addExample(time.Minute)
|
addExample(time.Minute)
|
||||||
|
addExample(graphsync.NewRequestID())
|
||||||
|
addExample(datatransfer.TransferID(3))
|
||||||
|
addExample(datatransfer.Ongoing)
|
||||||
|
addExample(storeIDExample)
|
||||||
|
addExample(&storeIDExample)
|
||||||
|
addExample(clientEvent)
|
||||||
|
addExample(&clientEvent)
|
||||||
|
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||||
|
addExample(retrievalmarket.DealStatusNew)
|
||||||
|
addExample(&textSelExample)
|
||||||
|
addExample(&apiSelExample)
|
||||||
addExample(network.ReachabilityPublic)
|
addExample(network.ReachabilityPublic)
|
||||||
addExample(build.TestNetworkVersion)
|
addExample(build.TestNetworkVersion)
|
||||||
allocationId := verifreg.AllocationId(0)
|
allocationId := verifreg.AllocationId(0)
|
||||||
addExample(allocationId)
|
addExample(allocationId)
|
||||||
addExample(&allocationId)
|
addExample(&allocationId)
|
||||||
addExample(miner.SectorOnChainInfoFlags(0))
|
|
||||||
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
|
addExample(map[verifreg.AllocationId]verifreg.Allocation{})
|
||||||
claimId := verifreg.ClaimId(0)
|
claimId := verifreg.ClaimId(0)
|
||||||
addExample(claimId)
|
addExample(claimId)
|
||||||
@ -185,9 +205,10 @@ func init() {
|
|||||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||||
|
|
||||||
// miner specific
|
// miner specific
|
||||||
|
addExample(filestore.Path(".lotusminer/fstmp123"))
|
||||||
si := uint64(12)
|
si := uint64(12)
|
||||||
addExample(&si)
|
addExample(&si)
|
||||||
|
addExample(retrievalmarket.DealID(5))
|
||||||
addExample(map[string]cid.Cid{})
|
addExample(map[string]cid.Cid{})
|
||||||
addExample(map[string][]api.SealedRef{
|
addExample(map[string][]api.SealedRef{
|
||||||
"98000": {
|
"98000": {
|
||||||
@ -291,8 +312,17 @@ func init() {
|
|||||||
api.SubsystemMining,
|
api.SubsystemMining,
|
||||||
api.SubsystemSealing,
|
api.SubsystemSealing,
|
||||||
api.SubsystemSectorStorage,
|
api.SubsystemSectorStorage,
|
||||||
|
api.SubsystemMarkets,
|
||||||
|
})
|
||||||
|
addExample(api.DagstoreShardResult{
|
||||||
|
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||||
|
Error: "<error>",
|
||||||
|
})
|
||||||
|
addExample(api.DagstoreShardInfo{
|
||||||
|
Key: "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq",
|
||||||
|
State: "ShardStateAvailable",
|
||||||
|
Error: "<error>",
|
||||||
})
|
})
|
||||||
|
|
||||||
addExample(storiface.ResourceTable)
|
addExample(storiface.ResourceTable)
|
||||||
addExample(network.ScopeStat{
|
addExample(network.ScopeStat{
|
||||||
Memory: 123,
|
Memory: 123,
|
||||||
@ -326,6 +356,10 @@ func init() {
|
|||||||
addExample(map[string]bitfield.BitField{
|
addExample(map[string]bitfield.BitField{
|
||||||
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
|
||||||
})
|
})
|
||||||
|
addExample(&api.RaftStateData{
|
||||||
|
NonceMap: make(map[address.Address]uint64),
|
||||||
|
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
|
||||||
|
})
|
||||||
|
|
||||||
addExample(http.Header{
|
addExample(http.Header{
|
||||||
"Authorization": []string{"Bearer ey.."},
|
"Authorization": []string{"Bearer ey.."},
|
||||||
@ -425,6 +459,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
|
|||||||
i = &api.GatewayStruct{}
|
i = &api.GatewayStruct{}
|
||||||
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
|
||||||
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
|
||||||
|
case "Provider":
|
||||||
|
i = &api.LotusProviderStruct{}
|
||||||
|
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem()
|
||||||
|
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal))
|
||||||
default:
|
default:
|
||||||
panic("unknown type")
|
panic("unknown type")
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,6 @@ func CreateEthRPCAliases(as apitypes.Aliaser) {
|
|||||||
|
|
||||||
as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
|
as.AliasMethod("trace_block", "Filecoin.EthTraceBlock")
|
||||||
as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
|
as.AliasMethod("trace_replayBlockTransactions", "Filecoin.EthTraceReplayBlockTransactions")
|
||||||
as.AliasMethod("trace_transaction", "Filecoin.EthTraceTransaction")
|
|
||||||
|
|
||||||
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
as.AliasMethod("net_version", "Filecoin.NetVersion")
|
||||||
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
as.AliasMethod("net_listening", "Filecoin.NetListening")
|
||||||
|
@ -13,6 +13,9 @@ const (
|
|||||||
// SubsystemUnknown is a placeholder for the zero value. It should never
|
// SubsystemUnknown is a placeholder for the zero value. It should never
|
||||||
// be used.
|
// be used.
|
||||||
SubsystemUnknown MinerSubsystem = iota
|
SubsystemUnknown MinerSubsystem = iota
|
||||||
|
// SubsystemMarkets signifies the storage and retrieval
|
||||||
|
// deal-making subsystem.
|
||||||
|
SubsystemMarkets
|
||||||
// SubsystemMining signifies the mining subsystem.
|
// SubsystemMining signifies the mining subsystem.
|
||||||
SubsystemMining
|
SubsystemMining
|
||||||
// SubsystemSealing signifies the sealing subsystem.
|
// SubsystemSealing signifies the sealing subsystem.
|
||||||
@ -23,6 +26,7 @@ const (
|
|||||||
|
|
||||||
var MinerSubsystemToString = map[MinerSubsystem]string{
|
var MinerSubsystemToString = map[MinerSubsystem]string{
|
||||||
SubsystemUnknown: "Unknown",
|
SubsystemUnknown: "Unknown",
|
||||||
|
SubsystemMarkets: "Markets",
|
||||||
SubsystemMining: "Mining",
|
SubsystemMining: "Mining",
|
||||||
SubsystemSealing: "Sealing",
|
SubsystemSealing: "Sealing",
|
||||||
SubsystemSectorStorage: "SectorStorage",
|
SubsystemSectorStorage: "SectorStorage",
|
||||||
@ -30,6 +34,7 @@ var MinerSubsystemToString = map[MinerSubsystem]string{
|
|||||||
|
|
||||||
var MinerSubsystemToID = map[string]MinerSubsystem{
|
var MinerSubsystemToID = map[string]MinerSubsystem{
|
||||||
"Unknown": SubsystemUnknown,
|
"Unknown": SubsystemUnknown,
|
||||||
|
"Markets": SubsystemMarkets,
|
||||||
"Mining": SubsystemMining,
|
"Mining": SubsystemMining,
|
||||||
"Sealing": SubsystemSealing,
|
"Sealing": SubsystemSealing,
|
||||||
"SectorStorage": SubsystemSectorStorage,
|
"SectorStorage": SubsystemSectorStorage,
|
||||||
|
@ -21,13 +21,14 @@ import (
|
|||||||
|
|
||||||
address "github.com/filecoin-project/go-address"
|
address "github.com/filecoin-project/go-address"
|
||||||
bitfield "github.com/filecoin-project/go-bitfield"
|
bitfield "github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
jsonrpc "github.com/filecoin-project/go-jsonrpc"
|
jsonrpc "github.com/filecoin-project/go-jsonrpc"
|
||||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
big "github.com/filecoin-project/go-state-types/big"
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
|
||||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||||
dline "github.com/filecoin-project/go-state-types/dline"
|
dline "github.com/filecoin-project/go-state-types/dline"
|
||||||
@ -35,11 +36,12 @@ import (
|
|||||||
|
|
||||||
api "github.com/filecoin-project/lotus/api"
|
api "github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockFullNode is a mock of FullNode interface.
|
// MockFullNode is a mock of FullNode interface.
|
||||||
@ -508,6 +510,418 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientCalcCommP mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.CommPRet)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCalcCommP indicates an expected call of ClientCalcCommP.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelDataTransfer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDataTransferUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.DataTransferChannel)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealPieceCID mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(api.DataCIDSize)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealSize mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(api.DataSize)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealSize indicates an expected call of ClientDealSize.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientExport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientExport indicates an expected call of ClientExport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFindData mocks base method.
|
||||||
|
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].([]api.QueryOffer)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFindData indicates an expected call of ClientFindData.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGenCar mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGenCar indicates an expected call of ClientGenCar.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealInfo mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealStatus mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetRetrievalUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.RetrievalInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHasLocal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHasLocal indicates an expected call of ClientHasLocal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientImport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.ImportRes)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientImport indicates an expected call of ClientImport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDataTransfers mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.DataTransferChannel)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDeals mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListDeals", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDeals indicates an expected call of ClientListDeals.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListImports mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListImports", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.Import)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListImports indicates an expected call of ClientListImports.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListRetrievals mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.RetrievalInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListRetrievals indicates an expected call of ClientListRetrievals.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientMinerQueryOffer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(api.QueryOffer)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientQueryAsk mocks base method.
|
||||||
|
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*api.StorageAsk, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(*api.StorageAsk)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientQueryAsk indicates an expected call of ClientQueryAsk.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRemoveImport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRemoveImport indicates an expected call of ClientRemoveImport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRestartDataTransfer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieve mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.RestrievalRes)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveWait mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveWait indicates an expected call of ClientRetrieveWait.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStartDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStartDeal indicates an expected call of ClientStartDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStatelessDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
// Closing mocks base method.
|
// Closing mocks base method.
|
||||||
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -1107,21 +1521,6 @@ func (mr *MockFullNodeMockRecorder) EthTraceReplayBlockTransactions(arg0, arg1,
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceReplayBlockTransactions", reflect.TypeOf((*MockFullNode)(nil).EthTraceReplayBlockTransactions), arg0, arg1, arg2)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EthTraceTransaction mocks base method.
|
|
||||||
func (m *MockFullNode) EthTraceTransaction(arg0 context.Context, arg1 string) ([]*ethtypes.EthTraceTransaction, error) {
|
|
||||||
m.ctrl.T.Helper()
|
|
||||||
ret := m.ctrl.Call(m, "EthTraceTransaction", arg0, arg1)
|
|
||||||
ret0, _ := ret[0].([]*ethtypes.EthTraceTransaction)
|
|
||||||
ret1, _ := ret[1].(error)
|
|
||||||
return ret0, ret1
|
|
||||||
}
|
|
||||||
|
|
||||||
// EthTraceTransaction indicates an expected call of EthTraceTransaction.
|
|
||||||
func (mr *MockFullNodeMockRecorder) EthTraceTransaction(arg0, arg1 interface{}) *gomock.Call {
|
|
||||||
mr.mock.ctrl.T.Helper()
|
|
||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthTraceTransaction", reflect.TypeOf((*MockFullNode)(nil).EthTraceTransaction), arg0, arg1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EthUninstallFilter mocks base method.
|
// EthUninstallFilter mocks base method.
|
||||||
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -2535,6 +2934,36 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RaftLeader mocks base method.
|
||||||
|
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RaftLeader", arg0)
|
||||||
|
ret0, _ := ret[0].(peer.ID)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftLeader indicates an expected call of RaftLeader.
|
||||||
|
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftState mocks base method.
|
||||||
|
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "RaftState", arg0)
|
||||||
|
ret0, _ := ret[0].(*api.RaftStateData)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftState indicates an expected call of RaftState.
|
||||||
|
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
// Session mocks base method.
|
// Session mocks base method.
|
||||||
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -3210,7 +3639,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerInitialPledgeCollateral mocks base method.
|
// StateMinerInitialPledgeCollateral mocks base method.
|
||||||
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -3255,7 +3684,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerPreCommitDepositForPower mocks base method.
|
// StateMinerPreCommitDepositForPower mocks base method.
|
||||||
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -3420,10 +3849,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorExpiration mocks base method.
|
// StateSectorExpiration mocks base method.
|
||||||
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
|
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner1.SectorExpiration)
|
ret0, _ := ret[0].(*miner0.SectorExpiration)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -3450,10 +3879,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPartition mocks base method.
|
// StateSectorPartition mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
|
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner1.SectorLocation)
|
ret0, _ := ret[0].(*miner0.SectorLocation)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -3465,10 +3894,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPreCommitInfo mocks base method.
|
// StateSectorPreCommitInfo mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorPreCommitOnChainInfo, error) {
|
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner0.SectorPreCommitOnChainInfo)
|
ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
1111
api/proxy_gen.go
1111
api/proxy_gen.go
File diff suppressed because it is too large
Load Diff
219
api/types.go
219
api/types.go
@ -1,15 +1,23 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/ipfs/go-graphsync"
|
||||||
|
"github.com/ipld/go-ipld-prime"
|
||||||
|
"github.com/ipld/go-ipld-prime/codec/dagjson"
|
||||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||||
"github.com/libp2p/go-libp2p/core/network"
|
"github.com/libp2p/go-libp2p/core/network"
|
||||||
"github.com/libp2p/go-libp2p/core/peer"
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
ma "github.com/multiformats/go-multiaddr"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
@ -17,6 +25,27 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type MultiaddrSlice []ma.Multiaddr
|
||||||
|
|
||||||
|
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
|
||||||
|
var temp []string
|
||||||
|
if err := json.Unmarshal(raw, &temp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make([]ma.Multiaddr, len(temp))
|
||||||
|
for i, str := range temp {
|
||||||
|
res[i], err = ma.NewMultiaddr(str)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*m = res
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ json.Unmarshaler = new(MultiaddrSlice)
|
||||||
|
|
||||||
type ObjStat struct {
|
type ObjStat struct {
|
||||||
Size uint64
|
Size uint64
|
||||||
Links uint64
|
Links uint64
|
||||||
@ -40,6 +69,76 @@ type MessageSendSpec struct {
|
|||||||
MaximizeFeeCap bool
|
MaximizeFeeCap bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MpoolMessageWhole struct {
|
||||||
|
Msg *types.Message
|
||||||
|
Spec *MessageSendSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
|
||||||
|
type GraphSyncDataTransfer struct {
|
||||||
|
// GraphSync request id for this transfer
|
||||||
|
RequestID *graphsync.RequestID
|
||||||
|
// Graphsync state for this transfer
|
||||||
|
RequestState string
|
||||||
|
// If a channel ID is present, indicates whether this is the current graphsync request for this channel
|
||||||
|
// (could have changed in a restart)
|
||||||
|
IsCurrentChannelRequest bool
|
||||||
|
// Data transfer channel ID for this transfer
|
||||||
|
ChannelID *datatransfer.ChannelID
|
||||||
|
// Data transfer state for this transfer
|
||||||
|
ChannelState *DataTransferChannel
|
||||||
|
// Diagnostic information about this request -- and unexpected inconsistencies in
|
||||||
|
// request state
|
||||||
|
Diagnostics []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging
|
||||||
|
type TransferDiagnostics struct {
|
||||||
|
ReceivingTransfers []*GraphSyncDataTransfer
|
||||||
|
SendingTransfers []*GraphSyncDataTransfer
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataTransferChannel struct {
|
||||||
|
TransferID datatransfer.TransferID
|
||||||
|
Status datatransfer.Status
|
||||||
|
BaseCID cid.Cid
|
||||||
|
IsInitiator bool
|
||||||
|
IsSender bool
|
||||||
|
Voucher string
|
||||||
|
Message string
|
||||||
|
OtherPeer peer.ID
|
||||||
|
Transferred uint64
|
||||||
|
Stages *datatransfer.ChannelStages
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
|
||||||
|
func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel {
|
||||||
|
channel := DataTransferChannel{
|
||||||
|
TransferID: channelState.TransferID(),
|
||||||
|
Status: channelState.Status(),
|
||||||
|
BaseCID: channelState.BaseCID(),
|
||||||
|
IsSender: channelState.Sender() == hostID,
|
||||||
|
Message: channelState.Message(),
|
||||||
|
}
|
||||||
|
voucher := channelState.Voucher()
|
||||||
|
voucherJSON, err := ipld.Encode(voucher.Voucher, dagjson.Encode)
|
||||||
|
if err != nil {
|
||||||
|
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
|
||||||
|
} else {
|
||||||
|
channel.Voucher = string(voucherJSON)
|
||||||
|
}
|
||||||
|
if channel.IsSender {
|
||||||
|
channel.IsInitiator = !channelState.IsPull()
|
||||||
|
channel.Transferred = channelState.Sent()
|
||||||
|
channel.OtherPeer = channelState.Recipient()
|
||||||
|
} else {
|
||||||
|
channel.IsInitiator = channelState.IsPull()
|
||||||
|
channel.Transferred = channelState.Received()
|
||||||
|
channel.OtherPeer = channelState.Sender()
|
||||||
|
}
|
||||||
|
return channel
|
||||||
|
}
|
||||||
|
|
||||||
type NetStat struct {
|
type NetStat struct {
|
||||||
System *network.ScopeStat `json:",omitempty"`
|
System *network.ScopeStat `json:",omitempty"`
|
||||||
Transient *network.ScopeStat `json:",omitempty"`
|
Transient *network.ScopeStat `json:",omitempty"`
|
||||||
@ -135,6 +234,67 @@ type MessagePrototype struct {
|
|||||||
ValidNonce bool
|
ValidNonce bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RetrievalInfo struct {
|
||||||
|
PayloadCID cid.Cid
|
||||||
|
ID retrievalmarket.DealID
|
||||||
|
PieceCID *cid.Cid
|
||||||
|
PricePerByte abi.TokenAmount
|
||||||
|
UnsealPrice abi.TokenAmount
|
||||||
|
|
||||||
|
Status retrievalmarket.DealStatus
|
||||||
|
Message string // more information about deal state, particularly errors
|
||||||
|
Provider peer.ID
|
||||||
|
BytesReceived uint64
|
||||||
|
BytesPaidFor uint64
|
||||||
|
TotalPaid abi.TokenAmount
|
||||||
|
|
||||||
|
TransferChannelID *datatransfer.ChannelID
|
||||||
|
DataTransfer *DataTransferChannel
|
||||||
|
|
||||||
|
// optional event if part of ClientGetRetrievalUpdates
|
||||||
|
Event *retrievalmarket.ClientEvent
|
||||||
|
}
|
||||||
|
|
||||||
|
type RestrievalRes struct {
|
||||||
|
DealID retrievalmarket.DealID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Selector specifies ipld selector string
|
||||||
|
// - if the string starts with '{', it's interpreted as json selector string
|
||||||
|
// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/
|
||||||
|
// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path)
|
||||||
|
// see https://github.com/ipld/go-ipld-selector-text-lite
|
||||||
|
type Selector string
|
||||||
|
|
||||||
|
type DagSpec struct {
|
||||||
|
// DataSelector matches data to be retrieved
|
||||||
|
// - when using textselector, the path specifies subtree
|
||||||
|
// - the matched graph must have a single root
|
||||||
|
DataSelector *Selector
|
||||||
|
|
||||||
|
// ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector
|
||||||
|
// When true, in addition to the selection target, the resulting CAR will contain every block along the
|
||||||
|
// path back to, and including the original root
|
||||||
|
// When false the resulting CAR contains only the blocks of the target subdag
|
||||||
|
ExportMerkleProof bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExportRef struct {
|
||||||
|
Root cid.Cid
|
||||||
|
|
||||||
|
// DAGs array specifies a list of DAGs to export
|
||||||
|
// - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node
|
||||||
|
// - If exporting into a car file
|
||||||
|
// - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root
|
||||||
|
// - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car
|
||||||
|
// - When not specified defaults to a single DAG:
|
||||||
|
// - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}`
|
||||||
|
DAGs []DagSpec
|
||||||
|
|
||||||
|
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||||
|
DealID retrievalmarket.DealID
|
||||||
|
}
|
||||||
|
|
||||||
type MinerInfo struct {
|
type MinerInfo struct {
|
||||||
Owner address.Address // Must be an ID-address.
|
Owner address.Address // Must be an ID-address.
|
||||||
Worker address.Address // Must be an ID-address.
|
Worker address.Address // Must be an ID-address.
|
||||||
@ -191,7 +351,64 @@ type ForkUpgradeParams struct {
|
|||||||
UpgradeWatermelonHeight abi.ChainEpoch
|
UpgradeWatermelonHeight abi.ChainEpoch
|
||||||
UpgradeDragonHeight abi.ChainEpoch
|
UpgradeDragonHeight abi.ChainEpoch
|
||||||
UpgradePhoenixHeight abi.ChainEpoch
|
UpgradePhoenixHeight abi.ChainEpoch
|
||||||
UpgradeAussieHeight abi.ChainEpoch
|
}
|
||||||
|
|
||||||
|
type NonceMapType map[address.Address]uint64
|
||||||
|
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
|
||||||
|
|
||||||
|
type RaftStateData struct {
|
||||||
|
NonceMap NonceMapType
|
||||||
|
MsgUuids MsgUuidMapType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
|
||||||
|
marshalled := make(map[string]uint64)
|
||||||
|
for a, n := range *n {
|
||||||
|
marshalled[a.String()] = n
|
||||||
|
}
|
||||||
|
return json.Marshal(marshalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
|
||||||
|
unmarshalled := make(map[string]uint64)
|
||||||
|
err := json.Unmarshal(b, &unmarshalled)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*n = make(map[address.Address]uint64)
|
||||||
|
for saddr, nonce := range unmarshalled {
|
||||||
|
a, err := address.NewFromString(saddr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
(*n)[a] = nonce
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
|
||||||
|
marshalled := make(map[string]*types.SignedMessage)
|
||||||
|
for u, msg := range *m {
|
||||||
|
marshalled[u.String()] = msg
|
||||||
|
}
|
||||||
|
return json.Marshal(marshalled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
|
||||||
|
unmarshalled := make(map[string]*types.SignedMessage)
|
||||||
|
err := json.Unmarshal(b, &unmarshalled)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*m = make(map[uuid.UUID]*types.SignedMessage)
|
||||||
|
for suid, msg := range unmarshalled {
|
||||||
|
u, err := uuid.Parse(suid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
(*m)[u] = msg
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChainExportConfig holds configuration for chain ranged exports.
|
// ChainExportConfig holds configuration for chain ranged exports.
|
||||||
|
@ -5,9 +5,14 @@ import (
|
|||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
textselector "github.com/ipld/go-ipld-selector-text-lite"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
@ -19,7 +24,9 @@ import (
|
|||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
|
//go:generate go run github.com/golang/mock/mockgen -destination=v0mocks/mock_full.go -package=v0mocks . FullNode
|
||||||
@ -286,7 +293,7 @@ type FullNode interface {
|
|||||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||||
// WalletSetDefault marks the given address as the default one.
|
// WalletSetDefault marks the given address as as the default one.
|
||||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||||
// WalletExport returns the private key of an address in the wallet.
|
// WalletExport returns the private key of an address in the wallet.
|
||||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||||
@ -298,6 +305,74 @@ type FullNode interface {
|
|||||||
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
|
WalletValidateAddress(context.Context, string) (address.Address, error) //perm:read
|
||||||
|
|
||||||
// Other
|
// Other
|
||||||
|
|
||||||
|
// MethodGroup: Client
|
||||||
|
// The Client methods all have to do with interacting with the storage and
|
||||||
|
// retrieval markets as a client
|
||||||
|
|
||||||
|
// ClientImport imports file under the specified path into filestore.
|
||||||
|
ClientImport(ctx context.Context, ref api.FileRef) (*api.ImportRes, error) //perm:admin
|
||||||
|
// ClientRemoveImport removes file import
|
||||||
|
ClientRemoveImport(ctx context.Context, importID imports.ID) error //perm:admin
|
||||||
|
// ClientStartDeal proposes a deal with a miner.
|
||||||
|
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
|
||||||
|
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
|
||||||
|
ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
|
||||||
|
// ClientGetDealInfo returns the latest information about a given deal.
|
||||||
|
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
|
||||||
|
// ClientListDeals returns information about the deals made by the local client.
|
||||||
|
ClientListDeals(ctx context.Context) ([]api.DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealUpdates returns the status of updated deals
|
||||||
|
ClientGetDealUpdates(ctx context.Context) (<-chan api.DealInfo, error) //perm:write
|
||||||
|
// ClientGetDealStatus returns status given a code
|
||||||
|
ClientGetDealStatus(ctx context.Context, statusCode uint64) (string, error) //perm:read
|
||||||
|
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||||
|
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) //perm:write
|
||||||
|
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||||
|
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]api.QueryOffer, error) //perm:read
|
||||||
|
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||||
|
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read
|
||||||
|
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||||
|
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin
|
||||||
|
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||||
|
// of status updates.
|
||||||
|
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
|
||||||
|
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||||
|
// ClientListRetrievals returns information about retrievals made by the local client
|
||||||
|
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
|
||||||
|
// ClientGetRetrievalUpdates returns status of updated retrieval deals
|
||||||
|
ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
|
||||||
|
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP and data size of the specified CID
|
||||||
|
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
|
||||||
|
// ClientCalcCommP calculates the CommP for a specified file
|
||||||
|
ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet, error) //perm:write
|
||||||
|
// ClientGenCar generates a CAR file for the specified file.
|
||||||
|
ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error //perm:write
|
||||||
|
// ClientDealSize calculates real deal data size
|
||||||
|
ClientDealSize(ctx context.Context, root cid.Cid) (api.DataSize, error) //perm:read
|
||||||
|
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||||
|
ClientListDataTransfers(ctx context.Context) ([]api.DataTransferChannel, error) //perm:write
|
||||||
|
ClientDataTransferUpdates(ctx context.Context) (<-chan api.DataTransferChannel, error) //perm:write
|
||||||
|
// ClientRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer
|
||||||
|
ClientRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientCancelDataTransfer cancels a data transfer with the given transfer ID and other peer
|
||||||
|
ClientCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||||
|
// which are stuck due to insufficient funds
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error //perm:write
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal cancels an ongoing retrieval deal based on DealID
|
||||||
|
ClientCancelRetrievalDeal(ctx context.Context, dealid retrievalmarket.DealID) error //perm:write
|
||||||
|
|
||||||
|
// ClientUnimport removes references to the specified file from filestore
|
||||||
|
// ClientUnimport(path string)
|
||||||
|
|
||||||
|
// ClientListImports lists imported files and their root CIDs
|
||||||
|
ClientListImports(ctx context.Context) ([]api.Import, error) //perm:write
|
||||||
|
|
||||||
|
// ClientListAsks() []Ask
|
||||||
|
|
||||||
// MethodGroup: State
|
// MethodGroup: State
|
||||||
// The State methods are used to query, inspect, and interact with chain state.
|
// The State methods are used to query, inspect, and interact with chain state.
|
||||||
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
// Most methods take a TipSetKey as a parameter. The state looked up is the parent state of the tipset.
|
||||||
@ -668,3 +743,37 @@ type FullNode interface {
|
|||||||
// the path specified when calling CreateBackup is within the base path
|
// the path specified when calling CreateBackup is within the base path
|
||||||
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
CreateBackup(ctx context.Context, fpath string) error //perm:admin
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder {
|
||||||
|
return RetrievalOrder{
|
||||||
|
Root: o.Root,
|
||||||
|
Piece: o.Piece,
|
||||||
|
Size: o.Size,
|
||||||
|
Total: o.MinPrice,
|
||||||
|
UnsealPrice: o.UnsealPrice,
|
||||||
|
PaymentInterval: o.PaymentInterval,
|
||||||
|
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||||
|
Client: client,
|
||||||
|
|
||||||
|
Miner: o.Miner,
|
||||||
|
MinerPeer: &o.MinerPeer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrievalOrder struct {
|
||||||
|
// TODO: make this less unixfs specific
|
||||||
|
Root cid.Cid
|
||||||
|
Piece *cid.Cid
|
||||||
|
DatamodelPathSelector *textselector.Expression
|
||||||
|
Size uint64
|
||||||
|
|
||||||
|
FromLocalCAR string // if specified, get data from a local CARv2 file.
|
||||||
|
// TODO: support offset
|
||||||
|
Total types.BigInt
|
||||||
|
UnsealPrice types.BigInt
|
||||||
|
PaymentInterval uint64
|
||||||
|
PaymentIntervalIncrease uint64
|
||||||
|
Client address.Address
|
||||||
|
Miner address.Address
|
||||||
|
MinerPeer *retrievalmarket.RetrievalPeer
|
||||||
|
}
|
||||||
|
@ -6,11 +6,14 @@ import (
|
|||||||
|
|
||||||
type Common = api.Common
|
type Common = api.Common
|
||||||
type Net = api.Net
|
type Net = api.Net
|
||||||
|
type CommonNet = api.CommonNet
|
||||||
|
|
||||||
type CommonStruct = api.CommonStruct
|
type CommonStruct = api.CommonStruct
|
||||||
type CommonStub = api.CommonStub
|
type CommonStub = api.CommonStub
|
||||||
type NetStruct = api.NetStruct
|
type NetStruct = api.NetStruct
|
||||||
type NetStub = api.NetStub
|
type NetStub = api.NetStub
|
||||||
|
type CommonNetStruct = api.CommonNetStruct
|
||||||
|
type CommonNetStub = api.CommonNetStub
|
||||||
|
|
||||||
type StorageMiner = api.StorageMiner
|
type StorageMiner = api.StorageMiner
|
||||||
type StorageMinerStruct = api.StorageMinerStruct
|
type StorageMinerStruct = api.StorageMinerStruct
|
||||||
|
@ -7,10 +7,14 @@ import (
|
|||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
blocks "github.com/ipfs/go-block-format"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-bitfield"
|
"github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
"github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifregtypes "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
@ -22,7 +26,9 @@ import (
|
|||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
"github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrNotSupported = xerrors.New("method not supported")
|
var ErrNotSupported = xerrors.New("method not supported")
|
||||||
@ -84,6 +90,60 @@ type FullNodeMethods struct {
|
|||||||
|
|
||||||
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientCalcCommP func(p0 context.Context, p1 string) (*api.CommPRet, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||||
|
|
||||||
|
ClientCancelRetrievalDeal func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"write"`
|
||||||
|
|
||||||
|
ClientDataTransferUpdates func(p0 context.Context) (<-chan api.DataTransferChannel, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientDealPieceCID func(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientDealSize func(p0 context.Context, p1 cid.Cid) (api.DataSize, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientGenCar func(p0 context.Context, p1 api.FileRef, p2 string) error `perm:"write"`
|
||||||
|
|
||||||
|
ClientGetDealInfo func(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientGetDealStatus func(p0 context.Context, p1 uint64) (string, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ClientListDataTransfers func(p0 context.Context) ([]api.DataTransferChannel, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientListDeals func(p0 context.Context) ([]api.DealInfo, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
|
||||||
|
|
||||||
|
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
|
||||||
|
|
||||||
|
ClientRemoveImport func(p0 context.Context, p1 imports.ID) error `perm:"admin"`
|
||||||
|
|
||||||
|
ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"`
|
||||||
|
|
||||||
|
ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"`
|
||||||
|
|
||||||
|
ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"`
|
||||||
|
|
||||||
|
ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
|
||||||
|
|
||||||
|
ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
|
||||||
|
|
||||||
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
|
||||||
|
|
||||||
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
|
||||||
@ -736,6 +796,303 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey)
|
|||||||
return *new(types.BigInt), ErrNotSupported
|
return *new(types.BigInt), ErrNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
|
||||||
|
if s.Internal.ClientCalcCommP == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientCalcCommP(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientCalcCommP(p0 context.Context, p1 string) (*api.CommPRet, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||||
|
if s.Internal.ClientCancelDataTransfer == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientCancelDataTransfer(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientCancelDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||||
|
if s.Internal.ClientCancelRetrievalDeal == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientCancelRetrievalDeal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientCancelRetrievalDeal(p0 context.Context, p1 retrievalmarket.DealID) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||||
|
if s.Internal.ClientDataTransferUpdates == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientDataTransferUpdates(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientDataTransferUpdates(p0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
|
||||||
|
if s.Internal.ClientDealPieceCID == nil {
|
||||||
|
return *new(api.DataCIDSize), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientDealPieceCID(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientDealPieceCID(p0 context.Context, p1 cid.Cid) (api.DataCIDSize, error) {
|
||||||
|
return *new(api.DataCIDSize), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
|
||||||
|
if s.Internal.ClientDealSize == nil {
|
||||||
|
return *new(api.DataSize), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientDealSize(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (api.DataSize, error) {
|
||||||
|
return *new(api.DataSize), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||||
|
if s.Internal.ClientFindData == nil {
|
||||||
|
return *new([]api.QueryOffer), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientFindData(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||||
|
return *new([]api.QueryOffer), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
|
||||||
|
if s.Internal.ClientGenCar == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientGenCar(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientGenCar(p0 context.Context, p1 api.FileRef, p2 string) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
|
||||||
|
if s.Internal.ClientGetDealInfo == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientGetDealInfo(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientGetDealInfo(p0 context.Context, p1 cid.Cid) (*api.DealInfo, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||||
|
if s.Internal.ClientGetDealStatus == nil {
|
||||||
|
return "", ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientGetDealStatus(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientGetDealStatus(p0 context.Context, p1 uint64) (string, error) {
|
||||||
|
return "", ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
|
||||||
|
if s.Internal.ClientGetDealUpdates == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientGetDealUpdates(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.DealInfo, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||||
|
if s.Internal.ClientGetRetrievalUpdates == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientGetRetrievalUpdates(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||||
|
if s.Internal.ClientHasLocal == nil {
|
||||||
|
return false, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientHasLocal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
|
||||||
|
return false, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
|
||||||
|
if s.Internal.ClientImport == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientImport(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientImport(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
|
||||||
|
if s.Internal.ClientListDataTransfers == nil {
|
||||||
|
return *new([]api.DataTransferChannel), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientListDataTransfers(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientListDataTransfers(p0 context.Context) ([]api.DataTransferChannel, error) {
|
||||||
|
return *new([]api.DataTransferChannel), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
|
||||||
|
if s.Internal.ClientListDeals == nil {
|
||||||
|
return *new([]api.DealInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientListDeals(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientListDeals(p0 context.Context) ([]api.DealInfo, error) {
|
||||||
|
return *new([]api.DealInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientListImports(p0 context.Context) ([]api.Import, error) {
|
||||||
|
if s.Internal.ClientListImports == nil {
|
||||||
|
return *new([]api.Import), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientListImports(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, error) {
|
||||||
|
return *new([]api.Import), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
|
||||||
|
if s.Internal.ClientListRetrievals == nil {
|
||||||
|
return *new([]api.RetrievalInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientListRetrievals(p0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
|
||||||
|
return *new([]api.RetrievalInfo), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
|
||||||
|
if s.Internal.ClientMinerQueryOffer == nil {
|
||||||
|
return *new(api.QueryOffer), ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
|
||||||
|
return *new(api.QueryOffer), ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||||
|
if s.Internal.ClientQueryAsk == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientQueryAsk(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientQueryAsk(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||||
|
if s.Internal.ClientRemoveImport == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientRemoveImport(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientRemoveImport(p0 context.Context, p1 imports.ID) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||||
|
if s.Internal.ClientRestartDataTransfer == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientRestartDataTransfer(p0, p1, p2, p3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||||
|
if s.Internal.ClientRetrieve == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientRetrieve(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||||
|
if s.Internal.ClientRetrieveTryRestartInsufficientFunds == nil {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientRetrieveTryRestartInsufficientFunds(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error {
|
||||||
|
return ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
|
if s.Internal.ClientRetrieveWithEvents == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientRetrieveWithEvents(p0, p1, p2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
if s.Internal.ClientStartDeal == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientStartDeal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
if s.Internal.ClientStatelessDeal == nil {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
return s.Internal.ClientStatelessDeal(p0, p1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
return nil, ErrNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
|
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
|
||||||
if s.Internal.CreateBackup == nil {
|
if s.Internal.CreateBackup == nil {
|
||||||
return ErrNotSupported
|
return ErrNotSupported
|
||||||
|
@ -20,12 +20,14 @@ import (
|
|||||||
|
|
||||||
address "github.com/filecoin-project/go-address"
|
address "github.com/filecoin-project/go-address"
|
||||||
bitfield "github.com/filecoin-project/go-bitfield"
|
bitfield "github.com/filecoin-project/go-bitfield"
|
||||||
|
datatransfer "github.com/filecoin-project/go-data-transfer/v2"
|
||||||
|
retrievalmarket "github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
storagemarket "github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
auth "github.com/filecoin-project/go-jsonrpc/auth"
|
||||||
abi "github.com/filecoin-project/go-state-types/abi"
|
abi "github.com/filecoin-project/go-state-types/abi"
|
||||||
big "github.com/filecoin-project/go-state-types/big"
|
big "github.com/filecoin-project/go-state-types/big"
|
||||||
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
|
|
||||||
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
|
||||||
miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner"
|
||||||
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
|
||||||
crypto "github.com/filecoin-project/go-state-types/crypto"
|
crypto "github.com/filecoin-project/go-state-types/crypto"
|
||||||
dline "github.com/filecoin-project/go-state-types/dline"
|
dline "github.com/filecoin-project/go-state-types/dline"
|
||||||
@ -33,10 +35,13 @@ import (
|
|||||||
|
|
||||||
api "github.com/filecoin-project/lotus/api"
|
api "github.com/filecoin-project/lotus/api"
|
||||||
apitypes "github.com/filecoin-project/lotus/api/types"
|
apitypes "github.com/filecoin-project/lotus/api/types"
|
||||||
miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
v0api "github.com/filecoin-project/lotus/api/v0api"
|
||||||
|
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
types "github.com/filecoin-project/lotus/chain/types"
|
types "github.com/filecoin-project/lotus/chain/types"
|
||||||
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
alerting "github.com/filecoin-project/lotus/journal/alerting"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
dtypes "github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||||
|
imports "github.com/filecoin-project/lotus/node/repo/imports"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MockFullNode is a mock of FullNode interface.
|
// MockFullNode is a mock of FullNode interface.
|
||||||
@ -449,6 +454,404 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g
|
|||||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientCalcCommP mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCalcCommP(arg0 context.Context, arg1 string) (*api.CommPRet, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCalcCommP", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.CommPRet)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCalcCommP indicates an expected call of ClientCalcCommP.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCalcCommP(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCalcCommP", reflect.TypeOf((*MockFullNode)(nil).ClientCalcCommP), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelDataTransfer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCancelDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCancelDataTransfer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelDataTransfer indicates an expected call of ClientCancelDataTransfer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCancelDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientCancelDataTransfer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientCancelRetrievalDeal(arg0 context.Context, arg1 retrievalmarket.DealID) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientCancelRetrievalDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientCancelRetrievalDeal indicates an expected call of ClientCancelRetrievalDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientCancelRetrievalDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientCancelRetrievalDeal", reflect.TypeOf((*MockFullNode)(nil).ClientCancelRetrievalDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDataTransferUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDataTransferUpdates(arg0 context.Context) (<-chan api.DataTransferChannel, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDataTransferUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.DataTransferChannel)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDataTransferUpdates indicates an expected call of ClientDataTransferUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDataTransferUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDataTransferUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientDataTransferUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealPieceCID mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDealPieceCID(arg0 context.Context, arg1 cid.Cid) (api.DataCIDSize, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDealPieceCID", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(api.DataCIDSize)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealPieceCID indicates an expected call of ClientDealPieceCID.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDealPieceCID(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealPieceCID", reflect.TypeOf((*MockFullNode)(nil).ClientDealPieceCID), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealSize mocks base method.
|
||||||
|
func (m *MockFullNode) ClientDealSize(arg0 context.Context, arg1 cid.Cid) (api.DataSize, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientDealSize", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(api.DataSize)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientDealSize indicates an expected call of ClientDealSize.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFindData mocks base method.
|
||||||
|
func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientFindData", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].([]api.QueryOffer)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientFindData indicates an expected call of ClientFindData.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientFindData(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientFindData", reflect.TypeOf((*MockFullNode)(nil).ClientFindData), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGenCar mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGenCar(arg0 context.Context, arg1 api.FileRef, arg2 string) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGenCar", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGenCar indicates an expected call of ClientGenCar.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGenCar(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGenCar", reflect.TypeOf((*MockFullNode)(nil).ClientGenCar), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealInfo mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealInfo(arg0 context.Context, arg1 cid.Cid) (*api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealInfo", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealInfo indicates an expected call of ClientGetDealInfo.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealInfo(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealInfo", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealInfo), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealStatus mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealStatus(arg0 context.Context, arg1 uint64) (string, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealStatus", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(string)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealStatus indicates an expected call of ClientGetDealStatus.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealStatus(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealStatus", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealStatus), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetDealUpdates(arg0 context.Context) (<-chan api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetDealUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetDealUpdates indicates an expected call of ClientGetDealUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetDealUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetDealUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetDealUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetRetrievalUpdates mocks base method.
|
||||||
|
func (m *MockFullNode) ClientGetRetrievalUpdates(arg0 context.Context) (<-chan api.RetrievalInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientGetRetrievalUpdates", arg0)
|
||||||
|
ret0, _ := ret[0].(<-chan api.RetrievalInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientGetRetrievalUpdates indicates an expected call of ClientGetRetrievalUpdates.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientGetRetrievalUpdates(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientGetRetrievalUpdates", reflect.TypeOf((*MockFullNode)(nil).ClientGetRetrievalUpdates), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHasLocal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientHasLocal(arg0 context.Context, arg1 cid.Cid) (bool, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientHasLocal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(bool)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientHasLocal indicates an expected call of ClientHasLocal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientHasLocal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientHasLocal", reflect.TypeOf((*MockFullNode)(nil).ClientHasLocal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientImport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientImport(arg0 context.Context, arg1 api.FileRef) (*api.ImportRes, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientImport", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*api.ImportRes)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientImport indicates an expected call of ClientImport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientImport(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientImport", reflect.TypeOf((*MockFullNode)(nil).ClientImport), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDataTransfers mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListDataTransfers(arg0 context.Context) ([]api.DataTransferChannel, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListDataTransfers", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.DataTransferChannel)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDataTransfers indicates an expected call of ClientListDataTransfers.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListDataTransfers(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDataTransfers", reflect.TypeOf((*MockFullNode)(nil).ClientListDataTransfers), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDeals mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListDeals(arg0 context.Context) ([]api.DealInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListDeals", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.DealInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListDeals indicates an expected call of ClientListDeals.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListDeals(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListDeals", reflect.TypeOf((*MockFullNode)(nil).ClientListDeals), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListImports mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListImports(arg0 context.Context) ([]api.Import, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListImports", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.Import)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListImports indicates an expected call of ClientListImports.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListImports(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListImports", reflect.TypeOf((*MockFullNode)(nil).ClientListImports), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListRetrievals mocks base method.
|
||||||
|
func (m *MockFullNode) ClientListRetrievals(arg0 context.Context) ([]api.RetrievalInfo, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientListRetrievals", arg0)
|
||||||
|
ret0, _ := ret[0].([]api.RetrievalInfo)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientListRetrievals indicates an expected call of ClientListRetrievals.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientListRetrievals(arg0 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientListRetrievals", reflect.TypeOf((*MockFullNode)(nil).ClientListRetrievals), arg0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientMinerQueryOffer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientMinerQueryOffer(arg0 context.Context, arg1 address.Address, arg2 cid.Cid, arg3 *cid.Cid) (api.QueryOffer, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientMinerQueryOffer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(api.QueryOffer)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientMinerQueryOffer indicates an expected call of ClientMinerQueryOffer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientMinerQueryOffer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientMinerQueryOffer", reflect.TypeOf((*MockFullNode)(nil).ClientMinerQueryOffer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientQueryAsk mocks base method.
|
||||||
|
func (m *MockFullNode) ClientQueryAsk(arg0 context.Context, arg1 peer.ID, arg2 address.Address) (*storagemarket.StorageAsk, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientQueryAsk", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(*storagemarket.StorageAsk)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientQueryAsk indicates an expected call of ClientQueryAsk.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientQueryAsk(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientQueryAsk", reflect.TypeOf((*MockFullNode)(nil).ClientQueryAsk), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRemoveImport mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRemoveImport(arg0 context.Context, arg1 imports.ID) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRemoveImport", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRemoveImport indicates an expected call of ClientRemoveImport.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRemoveImport(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRemoveImport", reflect.TypeOf((*MockFullNode)(nil).ClientRemoveImport), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRestartDataTransfer mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRestartDataTransfer(arg0 context.Context, arg1 datatransfer.TransferID, arg2 peer.ID, arg3 bool) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRestartDataTransfer", arg0, arg1, arg2, arg3)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRestartDataTransfer indicates an expected call of ClientRestartDataTransfer.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRestartDataTransfer", reflect.TypeOf((*MockFullNode)(nil).ClientRestartDataTransfer), arg0, arg1, arg2, arg3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieve mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieve indicates an expected call of ClientRetrieve.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieveTryRestartInsufficientFunds(arg0 context.Context, arg1 address.Address) error {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieveTryRestartInsufficientFunds", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(error)
|
||||||
|
return ret0
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveTryRestartInsufficientFunds indicates an expected call of ClientRetrieveTryRestartInsufficientFunds.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveWithEvents mocks base method.
|
||||||
|
func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2)
|
||||||
|
ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStartDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientStartDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientStartDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStartDeal indicates an expected call of ClientStartDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientStartDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStartDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStartDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStatelessDeal mocks base method.
|
||||||
|
func (m *MockFullNode) ClientStatelessDeal(arg0 context.Context, arg1 *api.StartDealParams) (*cid.Cid, error) {
|
||||||
|
m.ctrl.T.Helper()
|
||||||
|
ret := m.ctrl.Call(m, "ClientStatelessDeal", arg0, arg1)
|
||||||
|
ret0, _ := ret[0].(*cid.Cid)
|
||||||
|
ret1, _ := ret[1].(error)
|
||||||
|
return ret0, ret1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClientStatelessDeal indicates an expected call of ClientStatelessDeal.
|
||||||
|
func (mr *MockFullNodeMockRecorder) ClientStatelessDeal(arg0, arg1 interface{}) *gomock.Call {
|
||||||
|
mr.mock.ctrl.T.Helper()
|
||||||
|
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientStatelessDeal", reflect.TypeOf((*MockFullNode)(nil).ClientStatelessDeal), arg0, arg1)
|
||||||
|
}
|
||||||
|
|
||||||
// Closing mocks base method.
|
// Closing mocks base method.
|
||||||
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
@ -2296,7 +2699,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerInitialPledgeCollateral mocks base method.
|
// StateMinerInitialPledgeCollateral mocks base method.
|
||||||
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -2341,7 +2744,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateMinerPreCommitDepositForPower mocks base method.
|
// StateMinerPreCommitDepositForPower mocks base method.
|
||||||
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(big.Int)
|
ret0, _ := ret[0].(big.Int)
|
||||||
@ -2521,10 +2924,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 inter
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorExpiration mocks base method.
|
// StateSectorExpiration mocks base method.
|
||||||
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
|
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner1.SectorExpiration)
|
ret0, _ := ret[0].(*miner0.SectorExpiration)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -2551,10 +2954,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPartition mocks base method.
|
// StateSectorPartition mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
|
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(*miner1.SectorLocation)
|
ret0, _ := ret[0].(*miner0.SectorLocation)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
@ -2566,10 +2969,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StateSectorPreCommitInfo mocks base method.
|
// StateSectorPreCommitInfo mocks base method.
|
||||||
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner0.SectorPreCommitOnChainInfo, error) {
|
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) {
|
||||||
m.ctrl.T.Helper()
|
m.ctrl.T.Helper()
|
||||||
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
|
||||||
ret0, _ := ret[0].(miner0.SectorPreCommitOnChainInfo)
|
ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo)
|
||||||
ret1, _ := ret[1].(error)
|
ret1, _ := ret[1].(error)
|
||||||
return ret0, ret1
|
return ret0, ret1
|
||||||
}
|
}
|
||||||
|
@ -4,16 +4,21 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/libp2p/go-libp2p/core/peer"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||||
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
|
"github.com/filecoin-project/go-state-types/big"
|
||||||
"github.com/filecoin-project/go-state-types/crypto"
|
"github.com/filecoin-project/go-state-types/crypto"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/api"
|
"github.com/filecoin-project/lotus/api"
|
||||||
"github.com/filecoin-project/lotus/api/v1api"
|
"github.com/filecoin-project/lotus/api/v1api"
|
||||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
|
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WrapperV1Full struct {
|
type WrapperV1Full struct {
|
||||||
@ -205,10 +210,158 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty
|
|||||||
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error {
|
||||||
|
events := make(chan marketevents.RetrievalEvent)
|
||||||
|
go w.clientRetrieve(ctx, order, ref, events)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case evt, ok := <-events:
|
||||||
|
if !ok { // done successfully
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if evt.Err != "" {
|
||||||
|
return xerrors.Errorf("retrieval failed: %s", evt.Err)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.Errorf("retrieval timed out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) {
|
||||||
|
events := make(chan marketevents.RetrievalEvent)
|
||||||
|
go w.clientRetrieve(ctx, order, ref, events)
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error {
|
||||||
|
for {
|
||||||
|
var subscribeEvent api.RetrievalInfo
|
||||||
|
var evt retrievalmarket.ClientEvent
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.New("Retrieval Timed Out")
|
||||||
|
case subscribeEvent = <-subscribeEvents:
|
||||||
|
if subscribeEvent.ID != dealID {
|
||||||
|
// we can't check the deal ID ahead of time because:
|
||||||
|
// 1. We need to subscribe before retrieving.
|
||||||
|
// 2. We won't know the deal ID until after retrieving.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if subscribeEvent.Event != nil {
|
||||||
|
evt = *subscribeEvent.Event
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return xerrors.New("Retrieval Timed Out")
|
||||||
|
case events <- marketevents.RetrievalEvent{
|
||||||
|
Event: evt,
|
||||||
|
Status: subscribeEvent.Status,
|
||||||
|
BytesReceived: subscribeEvent.BytesReceived,
|
||||||
|
FundsSpent: subscribeEvent.TotalPaid,
|
||||||
|
}:
|
||||||
|
}
|
||||||
|
|
||||||
|
switch subscribeEvent.Status {
|
||||||
|
case retrievalmarket.DealStatusCompleted:
|
||||||
|
return nil
|
||||||
|
case retrievalmarket.DealStatusRejected:
|
||||||
|
return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message)
|
||||||
|
case
|
||||||
|
retrievalmarket.DealStatusDealNotFound,
|
||||||
|
retrievalmarket.DealStatusErrored:
|
||||||
|
return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) {
|
||||||
|
defer close(events)
|
||||||
|
|
||||||
|
finish := func(e error) {
|
||||||
|
if e != nil {
|
||||||
|
events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var dealID retrievalmarket.DealID
|
||||||
|
if order.FromLocalCAR == "" {
|
||||||
|
// Subscribe to events before retrieving to avoid losing events.
|
||||||
|
subscribeCtx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{
|
||||||
|
Root: order.Root,
|
||||||
|
Piece: order.Piece,
|
||||||
|
Size: order.Size,
|
||||||
|
Total: order.Total,
|
||||||
|
UnsealPrice: order.UnsealPrice,
|
||||||
|
PaymentInterval: order.PaymentInterval,
|
||||||
|
PaymentIntervalIncrease: order.PaymentIntervalIncrease,
|
||||||
|
Client: order.Client,
|
||||||
|
Miner: order.Miner,
|
||||||
|
MinerPeer: order.MinerPeer,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("Retrieve failed: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
dealID = retrievalRes.DealID
|
||||||
|
|
||||||
|
err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events)
|
||||||
|
if err != nil {
|
||||||
|
finish(xerrors.Errorf("Retrieve: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If ref is nil, it only fetches the data into the configured blockstore.
|
||||||
|
if ref == nil {
|
||||||
|
finish(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
eref := api.ExportRef{
|
||||||
|
Root: order.Root,
|
||||||
|
FromLocalCAR: order.FromLocalCAR,
|
||||||
|
DealID: dealID,
|
||||||
|
}
|
||||||
|
|
||||||
|
if order.DatamodelPathSelector != nil {
|
||||||
|
s := api.Selector(*order.DatamodelPathSelector)
|
||||||
|
eref.DAGs = append(eref.DAGs, api.DagSpec{
|
||||||
|
DataSelector: &s,
|
||||||
|
ExportMerkleProof: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
finish(w.ClientExport(ctx, eref, *ref))
|
||||||
|
}
|
||||||
|
|
||||||
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
|
func (w *WrapperV1Full) PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*api.ChannelInfo, error) {
|
||||||
return w.FullNode.PaychFund(ctx, from, to, amt)
|
return w.FullNode.PaychFund(ctx, from, to, amt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *WrapperV1Full) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) {
|
||||||
|
a, err := w.FullNode.ClientQueryAsk(ctx, p, miner)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return a.Response, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (w *WrapperV1Full) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
func (w *WrapperV1Full) BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error) {
|
||||||
return w.StateGetBeaconEntry(ctx, epoch)
|
return w.StateGetBeaconEntry(ctx, epoch)
|
||||||
}
|
}
|
||||||
|
@ -12,3 +12,5 @@ type RawFullNodeAPI FullNode
|
|||||||
func PermissionedFullAPI(a FullNode) FullNode {
|
func PermissionedFullAPI(a FullNode) FullNode {
|
||||||
return api.PermissionedFullAPI(a)
|
return api.PermissionedFullAPI(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type LotusProviderStruct = api.LotusProviderStruct
|
||||||
|
@ -59,6 +59,8 @@ var (
|
|||||||
|
|
||||||
MinerAPIVersion0 = newVer(1, 5, 0)
|
MinerAPIVersion0 = newVer(1, 5, 0)
|
||||||
WorkerAPIVersion0 = newVer(1, 7, 0)
|
WorkerAPIVersion0 = newVer(1, 7, 0)
|
||||||
|
|
||||||
|
ProviderAPIVersion0 = newVer(1, 0, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
//nolint:varcheck,deadcode
|
//nolint:varcheck,deadcode
|
||||||
|
@ -109,9 +109,11 @@ func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) er
|
|||||||
|
|
||||||
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
|
||||||
// both stores are viewable.
|
// both stores are viewable.
|
||||||
if err := bs.write.View(ctx, c, callback); !ipld.IsNotFound(err) {
|
if err := bs.write.View(ctx, c, callback); ipld.IsNotFound(err) {
|
||||||
|
// not found in write blockstore; fall through.
|
||||||
|
} else {
|
||||||
return err // propagate errors, or nil, i.e. found.
|
return err // propagate errors, or nil, i.e. found.
|
||||||
} // else not found in write blockstore; fall through.
|
}
|
||||||
return bs.read.View(ctx, c, callback)
|
return bs.read.View(ctx, c, callback)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,113 +0,0 @@
|
|||||||
package blockstore
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
blocks "github.com/ipfs/go-block-format"
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlockstoreCache is a cache for blocks, compatible with lru.Cache; Must be safe for concurrent access
|
|
||||||
type BlockstoreCache interface {
|
|
||||||
Remove(mhString MhString) bool
|
|
||||||
Contains(mhString MhString) bool
|
|
||||||
Get(mhString MhString) (blocks.Block, bool)
|
|
||||||
Add(mhString MhString, block blocks.Block) (evicted bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReadCachedBlockstore struct {
|
|
||||||
top Blockstore
|
|
||||||
cache BlockstoreCache
|
|
||||||
}
|
|
||||||
|
|
||||||
type MhString string
|
|
||||||
|
|
||||||
func NewReadCachedBlockstore(top Blockstore, cache BlockstoreCache) *ReadCachedBlockstore {
|
|
||||||
return &ReadCachedBlockstore{
|
|
||||||
top: top,
|
|
||||||
cache: cache,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
|
||||||
c.cache.Remove(MhString(cid.Hash()))
|
|
||||||
return c.top.DeleteBlock(ctx, cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
|
||||||
if c.cache.Contains(MhString(cid.Hash())) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.top.Has(ctx, cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
|
||||||
if out, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := c.top.Get(ctx, cid)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.cache.Add(MhString(cid.Hash()), out)
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
|
||||||
if b, ok := c.cache.Get(MhString(cid.Hash())); ok {
|
|
||||||
return len(b.RawData()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.top.GetSize(ctx, cid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
|
||||||
c.cache.Add(MhString(block.Cid().Hash()), block)
|
|
||||||
return c.top.Put(ctx, block)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
|
||||||
for _, b := range blocks {
|
|
||||||
c.cache.Add(MhString(b.Cid().Hash()), b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.top.PutMany(ctx, blocks)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
|
||||||
return c.top.AllKeysChan(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) HashOnRead(enabled bool) {
|
|
||||||
c.top.HashOnRead(enabled)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
|
|
||||||
return c.top.View(ctx, cid, func(bb []byte) error {
|
|
||||||
blk, err := blocks.NewBlockWithCid(bb, cid)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.cache.Add(MhString(cid.Hash()), blk)
|
|
||||||
|
|
||||||
return callback(bb)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
|
|
||||||
for _, ci := range cids {
|
|
||||||
c.cache.Remove(MhString(ci.Hash()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.top.DeleteMany(ctx, cids)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *ReadCachedBlockstore) Flush(ctx context.Context) error {
|
|
||||||
return c.top.Flush(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Blockstore = (*ReadCachedBlockstore)(nil)
|
|
154
blockstore/ipfs.go
Normal file
154
blockstore/ipfs.go
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
package blockstore
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/ipfs/boxo/path"
|
||||||
|
blocks "github.com/ipfs/go-block-format"
|
||||||
|
"github.com/ipfs/go-cid"
|
||||||
|
"github.com/multiformats/go-multiaddr"
|
||||||
|
"github.com/multiformats/go-multihash"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
rpc "github.com/filecoin-project/kubo-api-client"
|
||||||
|
iface "github.com/filecoin-project/kubo-api-client/coreiface"
|
||||||
|
"github.com/filecoin-project/kubo-api-client/coreiface/options"
|
||||||
|
)
|
||||||
|
|
||||||
|
type IPFSBlockstore struct {
|
||||||
|
ctx context.Context
|
||||||
|
api, offlineAPI iface.CoreAPI
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
|
||||||
|
|
||||||
|
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
|
||||||
|
localApi, err := rpc.NewLocalApi()
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
|
||||||
|
}
|
||||||
|
api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("setting offline mode: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
offlineAPI := api
|
||||||
|
if onlineMode {
|
||||||
|
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &IPFSBlockstore{
|
||||||
|
ctx: ctx,
|
||||||
|
api: api,
|
||||||
|
offlineAPI: offlineAPI,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Adapt(bs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
|
||||||
|
httpApi, err := rpc.NewApi(maddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
|
||||||
|
}
|
||||||
|
api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
offlineAPI := api
|
||||||
|
if onlineMode {
|
||||||
|
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("applying offline mode: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bs := &IPFSBlockstore{
|
||||||
|
ctx: ctx,
|
||||||
|
api: api,
|
||||||
|
offlineAPI: offlineAPI,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Adapt(bs), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
|
||||||
|
return xerrors.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
|
||||||
|
_, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid))
|
||||||
|
if err != nil {
|
||||||
|
// The underlying client is running in Offline mode.
|
||||||
|
// Stat() will fail with an err if the block isn't in the
|
||||||
|
// blockstore. If that's the case, return false without
|
||||||
|
// an error since that's the original intention of this method.
|
||||||
|
if err.Error() == "blockservice: key not found" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
|
||||||
|
rd, err := i.api.Block().Get(ctx, path.FromCid(cid))
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := io.ReadAll(rd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return blocks.NewBlockWithCid(data, cid)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
|
||||||
|
st, err := i.api.Block().Stat(ctx, path.FromCid(cid))
|
||||||
|
if err != nil {
|
||||||
|
return 0, xerrors.Errorf("getting ipfs block: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return st.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error {
|
||||||
|
mhd, err := multihash.Decode(block.Cid().Hash())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()),
|
||||||
|
options.Block.Hash(mhd.Code, mhd.Length),
|
||||||
|
options.Block.Format(multihash.Codes[block.Cid().Type()]))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
|
||||||
|
// TODO: could be done in parallel
|
||||||
|
|
||||||
|
for _, block := range blocks {
|
||||||
|
if err := i.Put(ctx, block); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||||
|
return nil, xerrors.Errorf("not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IPFSBlockstore) HashOnRead(enabled bool) {
|
||||||
|
return // TODO: We could technically support this, but..
|
||||||
|
}
|
@ -282,14 +282,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
|
|||||||
if ss.checkpointExists() {
|
if ss.checkpointExists() {
|
||||||
log.Info("found compaction checkpoint; resuming compaction")
|
log.Info("found compaction checkpoint; resuming compaction")
|
||||||
if err := ss.completeCompaction(); err != nil {
|
if err := ss.completeCompaction(); err != nil {
|
||||||
_ = markSetEnv.Close()
|
markSetEnv.Close() //nolint:errcheck
|
||||||
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
return nil, xerrors.Errorf("error resuming compaction: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ss.pruneCheckpointExists() {
|
if ss.pruneCheckpointExists() {
|
||||||
log.Info("found prune checkpoint; resuming prune")
|
log.Info("found prune checkpoint; resuming prune")
|
||||||
if err := ss.completePrune(); err != nil {
|
if err := ss.completePrune(); err != nil {
|
||||||
_ = markSetEnv.Close()
|
markSetEnv.Close() //nolint:errcheck
|
||||||
return nil, xerrors.Errorf("error resuming prune: %w", err)
|
return nil, xerrors.Errorf("error resuming prune: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,13 +109,16 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
|
|||||||
// TODO: ok to use hysteresis with no transitions between 30s and 1m?
|
// TODO: ok to use hysteresis with no transitions between 30s and 1m?
|
||||||
if time.Since(timestamp) < SyncWaitTime {
|
if time.Since(timestamp) < SyncWaitTime {
|
||||||
/* Chain in sync */
|
/* Chain in sync */
|
||||||
if !atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
|
if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
|
||||||
|
// already in sync, no signaling necessary
|
||||||
|
} else {
|
||||||
// transition from out of sync to in sync
|
// transition from out of sync to in sync
|
||||||
s.chainSyncMx.Lock()
|
s.chainSyncMx.Lock()
|
||||||
s.chainSyncFinished = true
|
s.chainSyncFinished = true
|
||||||
s.chainSyncCond.Broadcast()
|
s.chainSyncCond.Broadcast()
|
||||||
s.chainSyncMx.Unlock()
|
s.chainSyncMx.Unlock()
|
||||||
} // else already in sync, no signaling necessary
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
// 2. protect the new tipset(s)
|
// 2. protect the new tipset(s)
|
||||||
s.protectTipSets(apply)
|
s.protectTipSets(apply)
|
||||||
|
@ -47,7 +47,7 @@ var (
|
|||||||
PruneThreshold = 7 * build.Finality
|
PruneThreshold = 7 * build.Finality
|
||||||
)
|
)
|
||||||
|
|
||||||
// GCHotStore runs online GC on the chain state in the hotstore according the to options specified
|
// GCHotstore runs online GC on the chain state in the hotstore according the to options specified
|
||||||
func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error {
|
func (s *SplitStore) GCHotStore(opts api.HotGCOpts) error {
|
||||||
if opts.Moving {
|
if opts.Moving {
|
||||||
gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)}
|
gcOpts := []bstore.BlockstoreGCOption{bstore.WithFullGC(true)}
|
||||||
|
@ -32,7 +32,7 @@ func init() {
|
|||||||
CompactionBoundary = 2
|
CompactionBoundary = 2
|
||||||
WarmupBoundary = 0
|
WarmupBoundary = 0
|
||||||
SyncWaitTime = time.Millisecond
|
SyncWaitTime = time.Millisecond
|
||||||
_ = logging.SetLogLevel("splitstore", "DEBUG")
|
logging.SetLogLevel("splitstore", "DEBUG")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testSplitStore(t *testing.T, cfg *Config) {
|
func testSplitStore(t *testing.T, cfg *Config) {
|
||||||
|
Binary file not shown.
@ -1,3 +1,6 @@
|
|||||||
|
/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
|
||||||
|
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
|
||||||
|
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
|
||||||
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
|
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
|
||||||
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
|
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
|
||||||
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST
|
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST
|
||||||
|
@ -12,10 +12,10 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/DataDog/zstd"
|
||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
cbor "github.com/ipfs/go-ipld-cbor"
|
cbor "github.com/ipfs/go-ipld-cbor"
|
||||||
"github.com/ipld/go-car"
|
"github.com/ipld/go-car"
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
@ -145,10 +145,10 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
|
|||||||
)
|
)
|
||||||
|
|
||||||
if !strings.HasPrefix(bundle, "v") {
|
if !strings.HasPrefix(bundle, "v") {
|
||||||
return nil, xerrors.Errorf("bundle '%q' doesn't start with a 'v'", bundle)
|
return nil, xerrors.Errorf("bundle bundle '%q' doesn't start with a 'v'", bundle)
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(bundle, archiveExt) {
|
if !strings.HasSuffix(bundle, archiveExt) {
|
||||||
return nil, xerrors.Errorf("bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
return nil, xerrors.Errorf("bundle bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
||||||
}
|
}
|
||||||
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
|
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -160,10 +160,7 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
|
|||||||
}
|
}
|
||||||
defer fi.Close() //nolint
|
defer fi.Close() //nolint
|
||||||
|
|
||||||
uncompressed, err := zstd.NewReader(fi)
|
uncompressed := zstd.NewReader(fi)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer uncompressed.Close() //nolint
|
defer uncompressed.Close() //nolint
|
||||||
|
|
||||||
var bundles []*BuiltinActorsMetadata
|
var bundles []*BuiltinActorsMetadata
|
||||||
@ -258,10 +255,7 @@ func GetEmbeddedBuiltinActorsBundle(version actorstypes.Version, networkBundleNa
|
|||||||
}
|
}
|
||||||
defer fi.Close() //nolint
|
defer fi.Close() //nolint
|
||||||
|
|
||||||
uncompressed, err := zstd.NewReader(fi)
|
uncompressed := zstd.NewReader(fi)
|
||||||
if err != nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
defer uncompressed.Close() //nolint
|
defer uncompressed.Close() //nolint
|
||||||
|
|
||||||
tarReader := tar.NewReader(uncompressed)
|
tarReader := tar.NewReader(uncompressed)
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"github.com/ipfs/go-cid"
|
"github.com/ipfs/go-cid"
|
||||||
)
|
)
|
||||||
|
|
||||||
var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
||||||
Network: "butterflynet",
|
Network: "butterflynet",
|
||||||
Version: 8,
|
Version: 8,
|
||||||
|
|
||||||
@ -117,32 +117,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"),
|
"system": MustParseCid("bafk2bzacec3vwj2chzaram3iqupkbfiein5h2l5qiltlrngbju2vg5umelclm"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"),
|
"verifiedregistry": MustParseCid("bafk2bzacedv2irkql7nil3w5v3ohqq3e54w62pxeoppjmaktzokolaaoh5ksu"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "butterflynet",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzacedl533kwbzouqxibejpwp6syfdekvmzy4vmmno6j4iaydbdmv4xek"),
|
|
||||||
"cron": MustParseCid("bafk2bzacecimv5xnuwyoqgxk26qt4xqpgntleret475pnh35s3vvhqtdct4ow"),
|
|
||||||
"datacap": MustParseCid("bafk2bzacebpdd4ctavhs7wkcykfahpifct3p4hbptgtf4jfrqcp2trtlygvow"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceahw5rrgj7prgbnmn237di7ymjz2ssea32wr525jydpfrwpuhs67m"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzacebrslcbew5mq3le2zsn36xqxd4gt5hryeoslxnuqwgw3rhuwh6ygu"),
|
|
||||||
"evm": MustParseCid("bafk2bzaced5smz4lhpem4mbr7igcskv3e5qopbdp7dqshww2qs4ahacgzjzo4"),
|
|
||||||
"init": MustParseCid("bafk2bzacedgj6hawhdw2ot2ufisci374o2bq6bfkvlvdt6q7s3uoe5ffyv43k"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacectnnnpwyqiccaymy3h6ghu74ghjrqyhtqv5odfd4opivzebjj6to"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzaceckhx44jawhzhkz6k23gfnv2gcutgb4j4ekhonj2plwaent4b2tpk"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzacebbs3rlg7y3wbvxrj4wgbsqmasw4ksbbr3lyqbkaxj2t25qz6zzuy"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzaced3zmxsmlhp2nsiwkxcp2ugonbsebcd53t7htzo2jcoidvu464xmm"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzacebedx7iaa2ruspxvghkg46ez7un5b7oiijjtnvddq2aot5wk7p7ry"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzacebvne7m2l3hxxw4xa6oujol75x35yqpnlqiwx74jilyrop4cs7cse"),
|
|
||||||
"system": MustParseCid("bafk2bzaceacjmlxrvydlud77ilpzbscez46yedx6zjsj6olxsdeuv6d4x4cwe"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacebs5muoq7ft2wgqojhjio7a4vltbyprqkmlr43ojlzbil4nwvj3jg"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "butterflynet",
|
Network: "butterflynet",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"),
|
ManifestCid: MustParseCid("bafy2bzacec75zk7ufzwx6tg5avls5fxdjx5asaqmd2bfqdvkqrkzoxgyflosu"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -269,32 +246,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"),
|
"system": MustParseCid("bafk2bzacecioupndtcnyw6iq2hbrxag3aufvczlv5nobnfbkbywqzcyfaa376"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"),
|
"verifiedregistry": MustParseCid("bafk2bzaceavldupmf7bimeeacs67z5xdfdlfca6p7sn6bev3mt5ggepfqvhqo"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "calibrationnet",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzaceb3j36ri5y5mfklgp5emlvrms6g4733ss2j3l7jismrxq6ng3tcc6"),
|
|
||||||
"cron": MustParseCid("bafk2bzaceaz6rocamdxehgpwcbku6wlapwpgzyyvkrploj66mlqptsulf52bs"),
|
|
||||||
"datacap": MustParseCid("bafk2bzacea22nv5g3yngpxvonqfj4r2nkfk64y6yw2malicm7odk77x7zuads"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceatqtjzj7623i426noaslouvluhz6e3md3vvquqzku5qj3532uaxg"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzacean3hs7ga5csw6g3uu7watxfnqv5uvxviebn3ba6vg4sagwdur5pu"),
|
|
||||||
"evm": MustParseCid("bafk2bzacec5ibmbtzuzjgwjmksm2n6zfq3gkicxqywwu7tsscqgdzajpfctxk"),
|
|
||||||
"init": MustParseCid("bafk2bzaced5sq72oemz6qwi6yssxwlos2g54zfprslrx5qfhhx2vlgsbvdpcs"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacedbgei6jkx36fwdgvoohce4aghvpohqdhoco7p4thszgssms7olv2"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzaceasmgmfsi4mjanxlowsub65fmevhzky4toeqbtw4kp6tmu4kxjpgq"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzacedjyp6ll5ez27dfgldjj4tntxfvyp4pa5zkk7s5uhipzqjyx2gmuc"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzaceabolct6qdnefwcrtati2us3sxtxfghyqk6aamfhl6byyefmtssqi"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzaceckzw3v7wqliyggvjvihz4wywchnnsie4frfvkm3fm5znb64mofri"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzacea7t4wynzjajl442mpdqbnh3wusjusqtnzgpvefvweh4n2tgzgqhu"),
|
|
||||||
"system": MustParseCid("bafk2bzacedjnrb5glewazsxpcx6rwiuhl4kwrfcqolyprn6rrjtlzmthlhdq6"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacebj2zdquagzy2xxn7up574oemg3w7ed3fe4aujkyhgdwj57voesn2"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "calibrationnet",
|
Network: "calibrationnet",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"),
|
ManifestCid: MustParseCid("bafy2bzacect4ktyujrwp6mjlsitnpvuw2pbuppz6w52sfljyo4agjevzm75qs"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -430,32 +384,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"),
|
"system": MustParseCid("bafk2bzacedye5j5uxox7knb6zlnhseaadztyav76mjbyk5qslhhbpiy5cdtt2"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"),
|
"verifiedregistry": MustParseCid("bafk2bzacecduww5pirr7dvaijjijw4gf6ygf7vipgxh4scvv6vseo46gueb46"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "caterpillarnet",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzacecro3uo6ypqhfzwdhnamzcole5qmhrbkx7qny6t2qsrcpqxelt6s2"),
|
|
||||||
"cron": MustParseCid("bafk2bzaceam3kci46y4siltbw7f4itoap34kp7b7pvn2fco5s2bvnotomwdbe"),
|
|
||||||
"datacap": MustParseCid("bafk2bzacecmtdspcbqmmjtsaz4vucuqoqjqfsgxjonns7tom7eblkngbcm7bw"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceaudqhrt7djewopqdnryvwxagfufyt7ja4gdvovrxbh6edh6evgrw"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzaced676ds3z6xe333wr7frwq3f2iq5kjwp4okl3te6rne3xf7kuqrwm"),
|
|
||||||
"evm": MustParseCid("bafk2bzacebeih4jt2s6mel6x4hje7xmnugh6twul2a5axx4iczu7fu4wcdi6k"),
|
|
||||||
"init": MustParseCid("bafk2bzaceba7vvuzzwj5wqnq2bvpbgtxup53mhr3qybezbllftnxvpqbfymxo"),
|
|
||||||
"multisig": MustParseCid("bafk2bzaceapkajhnqoczrgry5javqbl7uebgmsbpqqfemzc4yb5q2dqia2qog"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzacebg7xq4ca22gafmdbkcq357x7v6slflib4h3fnj4amsovg6ulqg3o"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzaceajt4idf26ffnyipybcib55fykjxnek7oszkqzi7lu7mbgijmkgos"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzaceadfmay7pyl7osjsdmrireafasnjnoziacljy5ewrcsxpp56kzqbw"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzaceardbn5a7aq5jxl7efr4btmsbl7txnxm4hrrd3llyhujuc2cr5vcs"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzacear4563jznjqyseoy42xl6kenyqk6umv6xl3bp5bsjb3hbs6sp6bm"),
|
|
||||||
"system": MustParseCid("bafk2bzacecc5oavxivfnvirx2g7megpdf6lugooyoc2wijloju247xzjcdezy"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacebnkdt42mpf5emypo6iroux3hszfh5yt54v2mmnnura3ketholly4"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "caterpillarnet",
|
Network: "caterpillarnet",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"),
|
ManifestCid: MustParseCid("bafy2bzacedu7kk2zngxp7y3lynhtaht6vgadgn5jzkxe5nuowtwzasnogx63w"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -582,32 +513,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"),
|
"system": MustParseCid("bafk2bzacecnau5wddulbsvwn75tc3w75jrlvkybgrlxs4ngonqab6xq3eowvg"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"),
|
"verifiedregistry": MustParseCid("bafk2bzacec37mddea65nvh4htsagtryfa3sq6i67utcupslyhzbhjhoy6hopa"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "devnet",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzacebev3fu5geeehpx577b3kvza4xsmmggmepjj7rlsnr27hpoq27q2i"),
|
|
||||||
"cron": MustParseCid("bafk2bzacedalzqahtuz2bmnf7uawbcujfhhe5xzv5ys5ufadu6ggs3tcu6lsy"),
|
|
||||||
"datacap": MustParseCid("bafk2bzaceb7ou2vn7ac4xidespoowq2q5w7ognr7s4ujy3xzzgiishajpe7le"),
|
|
||||||
"eam": MustParseCid("bafk2bzacedqic2qskattorj4svf6mbto2k76ej3ll3ugsyorqramrg7rpq3by"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzaceaoad7iknpywijigv2h3jyvkijff2oxvohzue533v5hby3iix5vdu"),
|
|
||||||
"evm": MustParseCid("bafk2bzacecjgiw26gagsn6a7tffkrgoor4zfgzfokp76u6cwervtmvjbopmwg"),
|
|
||||||
"init": MustParseCid("bafk2bzaced2obubqojxggeddr246cpwtyzi6knnq52jsvsc2fs3tuk2kh6dtg"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacebquruzb6zho45orbdkku624t6w6jt4tudaqzraz4yh3li3jfstpg"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzaceaydrilyxvflsuzr24hmw32qwz6sy4hgls73bhpveydcsqskdgpca"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzaceb74owpuzdddqoj2tson6ymbyuguqrnqefyiaxqvwm4ygitpabjrq"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzaceaw6dslv6pfqha4ynghq2imij5khnnjrie22kmfgtpie3bvxho6jq"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzacecsputz6xygjfyrvx2d7bxkpp7b5v4icrmpckec7gnbabx2w377qs"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzaceceyaa5yjwhxvvcqouob4l746zp5nesivr6enhtpimakdtby6kafi"),
|
|
||||||
"system": MustParseCid("bafk2bzaceaxg6k5vuozxlemfi5hv663m6jcawzu5puboo4znj73i36e3tsovs"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacea2czkb4vt2iiiwdb6e57qfwqse4mk2pcyvwjmdl5ojbnla57oh2u"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "devnet",
|
Network: "devnet",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"),
|
ManifestCid: MustParseCid("bafy2bzacecn7uxgehrqbcs462ktl2h23u23cmduy2etqj6xrd6tkkja56fna4"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -757,32 +665,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"),
|
"system": MustParseCid("bafk2bzacebfqrja2hip7esf4eafxjmu6xcogoqu5xxtgdg7xa5szgvvdguchu"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"),
|
"verifiedregistry": MustParseCid("bafk2bzacedudgflxc75c77c6zkmfyq4u2xuk7k6xw6dfdccarjrvxx453b77q"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "mainnet",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzacedxnbtlsqdk76fsfmnhyvsblwyfducerwwtp3mqtx2wbrvs5idl52"),
|
|
||||||
"cron": MustParseCid("bafk2bzacebbopddyn5csb3fsuhh2an4ttd23x6qnwixgohlirj5ahtcudphyc"),
|
|
||||||
"datacap": MustParseCid("bafk2bzaceah42tfnhd7xnztawgf46gbvc3m2gudoxshlba2ucmmo2vy67t7ci"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceb23bhvvcjsth7cn7vp3gbaphrutsaz7v6hkls3ogotzs4bnhm4mk"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzaceautge6zhuy6jbj3uldwoxwhpywuon6z3xfvmdbzpbdribc6zzmei"),
|
|
||||||
"evm": MustParseCid("bafk2bzacedq6v2lyuhgywhlllwmudfj2zufzcauxcsvvd34m2ek5xr55mvh2q"),
|
|
||||||
"init": MustParseCid("bafk2bzacedr4xacm3fts4vilyeiacjr2hpmwzclyzulbdo24lrfxbtau2wbai"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacecr5zqarfqak42xqcfeulsxlavcltawsx2fvc7zsjtby6ti4b3wqc"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzacebntdhfmyc24e7tm52ggx5tnw4i3hrr3jmllsepv3mibez4hywsa2"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzacedq4q2kwkruu4xm7rkyygumlbw2yt4nimna2ivea4qarvtkohnuwu"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzacebjtoltdviyznpj34hh5qp6u257jnnbjole5rhqfixm7ug3epvrfu"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzacebf4rrqyk7gcfggggul6nfpzay7f2ordnkwm7z2wcf4mq6r7i77t2"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzacecjy4dkulvxppg3ocbmeixe2wgg6yxoyjxrm4ko2fm3uhpvfvam6e"),
|
|
||||||
"system": MustParseCid("bafk2bzacecyf523quuq2kdjfdvyty446z2ounmamtgtgeqnr3ynlu5cqrlt6e"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzacedkxehp7y7iyukbcje3wbpqcvufisos6exatkanyrbotoecdkrbta"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "mainnet",
|
Network: "mainnet",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"),
|
ManifestCid: MustParseCid("bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -909,32 +794,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
|
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
|
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "testing",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
|
|
||||||
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
|
|
||||||
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
|
|
||||||
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
|
|
||||||
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacebmftoql6dcyqf54xznwjg2bfgdsi67spqquwslpvvtvcx6qenhz2"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzaceailclue4dba2edjethfjw6ycufcwsx4qjjmgsh77xcyprmogdjvu"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzaceaqw6dhdjlqovhk3p4lb4sb25i5d6mhln2ir5m7tj6m4fegkgkinw"),
|
|
||||||
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "testing",
|
Network: "testing",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"),
|
ManifestCid: MustParseCid("bafy2bzacedg47dqxmtgzjch6i42kth72esd7w23gujyd6c6oppg3n6auag5ou"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
@ -1061,32 +923,9 @@ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
|
|||||||
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
|
"system": MustParseCid("bafk2bzacecp4roanbxq3bflftlkipsoqqxio5etjjnzxus5pcu7lq43fnxb34"),
|
||||||
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
|
"verifiedregistry": MustParseCid("bafk2bzaceandytrgcnuvizfi47sijbqh6c243vjtzlzumexm6kjv7s7hye45g"),
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
Network: "testing-fake-proofs",
|
|
||||||
Version: 13,
|
|
||||||
|
|
||||||
ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"),
|
|
||||||
Actors: map[string]cid.Cid{
|
|
||||||
"account": MustParseCid("bafk2bzaceb3tncntgeqvzzr5fzhvpsc5ntv3tpqrsh4jst4irfyzpkdyigibc"),
|
|
||||||
"cron": MustParseCid("bafk2bzacecwwasmhixpgtshczm5cfspwciyawc25mrefknqhlxfrd6m57tqmc"),
|
|
||||||
"datacap": MustParseCid("bafk2bzaceckj66by6eohjrybazh5cymmovgl5bmikpvzki2q7huwk2fweoef2"),
|
|
||||||
"eam": MustParseCid("bafk2bzaceafzm65wvnaam3775homn4vzsv7odftn5tkifmn44wd2t6gupy63y"),
|
|
||||||
"ethaccount": MustParseCid("bafk2bzaced4q7m4mha2dsezhwub3ru64rgimkg52t25ul4gnekax6uq7hbkqu"),
|
|
||||||
"evm": MustParseCid("bafk2bzaceakpknw5cuizil3552jr5z35rs6ijaignjigciswtok67drhzdss6"),
|
|
||||||
"init": MustParseCid("bafk2bzacec7mbkemwugyg2p4oy2xgnovykk4dnsu5ym4wkreooujvxfsxbo3i"),
|
|
||||||
"multisig": MustParseCid("bafk2bzacedy4vldq4viv6bzzh4fueip3by3axsbgbh655lashddgumknc6pvs"),
|
|
||||||
"paymentchannel": MustParseCid("bafk2bzaceau57wpiiikea6pu5om4ryyqjrxjzfksfl4reqosnohydzv3pf4qq"),
|
|
||||||
"placeholder": MustParseCid("bafk2bzacedfvut2myeleyq67fljcrw4kkmn5pb5dpyozovj7jpoez5irnc3ro"),
|
|
||||||
"reward": MustParseCid("bafk2bzacecvlcdgbqlk3dyfzkcjrywg2th5bmn7ilijifikulpxr4ffcrw23o"),
|
|
||||||
"storagemarket": MustParseCid("bafk2bzacecgj53dwqla7eiubs2uiza7cgxkxtefxkfpjontj5jxefl3a4i2nq"),
|
|
||||||
"storageminer": MustParseCid("bafk2bzaceb6atn3k6yhmskgmc3lgfiwpzpfmaxzacohtnb2hivme2oroycqr6"),
|
|
||||||
"storagepower": MustParseCid("bafk2bzacedameh56mp2g4y7nprhax5sddbzcmpk5p7l523l45rtn2wjc6ah4e"),
|
|
||||||
"system": MustParseCid("bafk2bzaceby6aiiosnrtb5kzlmrvd4k3o27oo3idmbd6llydz2uqibbp23pzq"),
|
|
||||||
"verifiedregistry": MustParseCid("bafk2bzaceadw6mxuyb6ipaq3fhekk7u5osyyiis3c4wbkvysy2ut6qfsua5zs"),
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Network: "testing-fake-proofs",
|
Network: "testing-fake-proofs",
|
||||||
Version: 14,
|
Version: 13,
|
||||||
BundleGitTag: "v13.0.0",
|
BundleGitTag: "v13.0.0",
|
||||||
ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"),
|
ManifestCid: MustParseCid("bafy2bzaceaf7fz33sp2i5ag5xg5ompn3dwppqlbwfacrwuvzaqdbqrtni7m5q"),
|
||||||
Actors: map[string]cid.Cid{
|
Actors: map[string]cid.Cid{
|
||||||
|
@ -1,109 +0,0 @@
|
|||||||
//go:build release
|
|
||||||
// +build release
|
|
||||||
|
|
||||||
package build_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
"github.com/ipld/go-car/v2"
|
|
||||||
"github.com/klauspost/compress/zstd"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/build"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestEmbeddedBuiltinActorsMetadata(t *testing.T) {
|
|
||||||
subjectsByVersionByNetworks := make(map[actorstypes.Version]map[string]*build.BuiltinActorsMetadata)
|
|
||||||
for _, subject := range build.EmbeddedBuiltinActorsMetadata {
|
|
||||||
if subject.BundleGitTag == "" {
|
|
||||||
// BundleGitTag is required to verify the SHA-256 checksum.
|
|
||||||
// The pack script only includes this for the latest network version, and it is good enough to only
|
|
||||||
// check the latest network version metadata. Hence the skip.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
v, ok := subjectsByVersionByNetworks[subject.Version]
|
|
||||||
if !ok {
|
|
||||||
v = make(map[string]*build.BuiltinActorsMetadata)
|
|
||||||
}
|
|
||||||
v[subject.Network] = subject
|
|
||||||
subjectsByVersionByNetworks[subject.Version] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
for version, networks := range subjectsByVersionByNetworks {
|
|
||||||
cachedCar, err := os.Open(fmt.Sprintf("./actors/v%v.tar.zst", version))
|
|
||||||
require.NoError(t, err)
|
|
||||||
t.Cleanup(func() { require.NoError(t, cachedCar.Close()) })
|
|
||||||
zstReader, err := zstd.NewReader(cachedCar)
|
|
||||||
require.NoError(t, err)
|
|
||||||
tarReader := tar.NewReader(zstReader)
|
|
||||||
for {
|
|
||||||
header, err := tarReader.Next()
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
network := strings.TrimSuffix(strings.TrimPrefix(header.Name, "builtin-actors-"), ".car")
|
|
||||||
subject, found := networks[network]
|
|
||||||
if !found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
shaURL := fmt.Sprintf("https://github.com/filecoin-project/builtin-actors/releases/download/%s/builtin-actors-%s.sha256", subject.BundleGitTag, subject.Network)
|
|
||||||
resp, err := http.Get(shaURL)
|
|
||||||
require.NoError(t, err, "failed to retrieve CAR SHA")
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode, "unexpected response status code while retrieving CAR SHA")
|
|
||||||
|
|
||||||
respBody, err := io.ReadAll(resp.Body)
|
|
||||||
require.NoError(t, resp.Body.Close())
|
|
||||||
require.NoError(t, err)
|
|
||||||
fields := strings.Fields(string(respBody))
|
|
||||||
require.Len(t, fields, 2)
|
|
||||||
wantShaHex := fields[0]
|
|
||||||
|
|
||||||
hasher := sha256.New()
|
|
||||||
reader, err := car.NewBlockReader(io.TeeReader(tarReader, hasher))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.EqualValues(t, 1, reader.Version)
|
|
||||||
require.Len(t, reader.Roots, 1, "expected exactly one root CID for builtin actors bundle network %s, version %v", subject.Network, subject.Version)
|
|
||||||
require.True(t, reader.Roots[0].Equals(subject.ManifestCid), "manifest CID does not match")
|
|
||||||
|
|
||||||
subjectActorsByCid := make(map[cid.Cid]string)
|
|
||||||
for name, c := range subject.Actors {
|
|
||||||
subjectActorsByCid[c] = name
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
next, err := reader.Next()
|
|
||||||
if errors.Is(err, io.EOF) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
require.NoError(t, err)
|
|
||||||
name, found := subjectActorsByCid[next.Cid()]
|
|
||||||
if found {
|
|
||||||
t.Logf("OK: %sv%v/%s -> %s", subject.Network, subject.Version, name, next.Cid())
|
|
||||||
delete(subjectActorsByCid, next.Cid())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
require.Empty(t, subjectActorsByCid, "ZST CAR bundle did not contain CIDs for all actors; missing: %v", subjectActorsByCid)
|
|
||||||
|
|
||||||
gotShaHex := hex.EncodeToString(hasher.Sum(nil))
|
|
||||||
require.Equal(t, wantShaHex, gotShaHex, "SHA-256 digest of ZST CAR bundle does not match builtin-actors release")
|
|
||||||
delete(networks, network)
|
|
||||||
}
|
|
||||||
require.Empty(t, networks, "CAR bundle did not contain CIDs for network; missing: %v", networks)
|
|
||||||
}
|
|
||||||
}
|
|
@ -67,10 +67,12 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
|||||||
Servers: []string{
|
Servers: []string{
|
||||||
"https://pl-eu.testnet.drand.sh",
|
"https://pl-eu.testnet.drand.sh",
|
||||||
"https://pl-us.testnet.drand.sh",
|
"https://pl-us.testnet.drand.sh",
|
||||||
|
"https://pl-sin.testnet.drand.sh",
|
||||||
},
|
},
|
||||||
Relays: []string{
|
Relays: []string{
|
||||||
"/dnsaddr/pl-eu.testnet.drand.sh/",
|
"/dnsaddr/pl-eu.testnet.drand.sh/",
|
||||||
"/dnsaddr/pl-us.testnet.drand.sh/",
|
"/dnsaddr/pl-us.testnet.drand.sh/",
|
||||||
|
"/dnsaddr/pl-sin.testnet.drand.sh/",
|
||||||
},
|
},
|
||||||
IsChained: true,
|
IsChained: true,
|
||||||
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
|
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
|
||||||
|
@ -2,6 +2,7 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
"embed"
|
"embed"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
@ -11,9 +12,17 @@ import (
|
|||||||
//go:embed openrpc
|
//go:embed openrpc
|
||||||
var openrpcfs embed.FS
|
var openrpcfs embed.FS
|
||||||
|
|
||||||
func mustReadOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
||||||
|
zr, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
m := apitypes.OpenRPCDocument{}
|
m := apitypes.OpenRPCDocument{}
|
||||||
err := json.NewDecoder(bytes.NewBuffer(data)).Decode(&m)
|
err = json.NewDecoder(zr).Decode(&m)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
err = zr.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -21,33 +30,33 @@ func mustReadOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/full.json")
|
data, err := openrpcfs.ReadFile("openrpc/full.json.gz")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadOpenRPCDocument(data)
|
return mustReadGzippedOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/miner.json")
|
data, err := openrpcfs.ReadFile("openrpc/miner.json.gz")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadOpenRPCDocument(data)
|
return mustReadGzippedOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/worker.json")
|
data, err := openrpcfs.ReadFile("openrpc/worker.json.gz")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadOpenRPCDocument(data)
|
return mustReadGzippedOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
|
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
|
||||||
data, err := openrpcfs.ReadFile("openrpc/gateway.json")
|
data, err := openrpcfs.ReadFile("openrpc/gateway.json.gz")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
return mustReadOpenRPCDocument(data)
|
return mustReadGzippedOpenRPCDocument(data)
|
||||||
}
|
}
|
||||||
|
24283
build/openrpc/full.json
24283
build/openrpc/full.json
File diff suppressed because it is too large
Load Diff
BIN
build/openrpc/full.json.gz
Normal file
BIN
build/openrpc/full.json.gz
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
build/openrpc/gateway.json.gz
Normal file
BIN
build/openrpc/gateway.json.gz
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
build/openrpc/miner.json.gz
Normal file
BIN
build/openrpc/miner.json.gz
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
build/openrpc/worker.json.gz
Normal file
BIN
build/openrpc/worker.json.gz
Normal file
Binary file not shown.
@ -28,31 +28,13 @@ var PanicReportingPath = "panic-reports"
|
|||||||
// the lotus journal to be included in the panic report.
|
// the lotus journal to be included in the panic report.
|
||||||
var PanicReportJournalTail = defaultJournalTail
|
var PanicReportJournalTail = defaultJournalTail
|
||||||
|
|
||||||
// GenerateNodePanicReport produces a timestamped dump of the application state
|
// GeneratePanicReport produces a timestamped dump of the application state
|
||||||
// for inspection and debugging purposes. Call this function from any place
|
// for inspection and debugging purposes. Call this function from any place
|
||||||
// where a panic or severe error needs to be examined. `persistPath` is the
|
// where a panic or severe error needs to be examined. `persistPath` is the
|
||||||
// path where the reports should be saved. `repoPath` is the path where the
|
// path where the reports should be saved. `repoPath` is the path where the
|
||||||
// journal should be read from. `label` is an optional string to include
|
// journal should be read from. `label` is an optional string to include
|
||||||
// next to the report timestamp.
|
// next to the report timestamp.
|
||||||
//
|
func GeneratePanicReport(persistPath, repoPath, label string) {
|
||||||
// This function should be called for panics originating from the Lotus daemon.
|
|
||||||
func GenerateNodePanicReport(persistPath, repoPath, label string) {
|
|
||||||
generatePanicReport(NodeUserVersion(), persistPath, repoPath, label)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateMinerPanicReport produces a timestamped dump of the application state
|
|
||||||
// for inspection and debugging purposes. Call this function from any place
|
|
||||||
// where a panic or severe error needs to be examined. `persistPath` is the
|
|
||||||
// path where the reports should be saved. `repoPath` is the path where the
|
|
||||||
// journal should be read from. `label` is an optional string to include
|
|
||||||
// next to the report timestamp.
|
|
||||||
//
|
|
||||||
// This function should be called for panics originating from the Lotus miner.
|
|
||||||
func GenerateMinerPanicReport(persistPath, repoPath, label string) {
|
|
||||||
generatePanicReport(MinerUserVersion(), persistPath, repoPath, label)
|
|
||||||
}
|
|
||||||
|
|
||||||
func generatePanicReport(buildVersion BuildVersion, persistPath, repoPath, label string) {
|
|
||||||
// make sure we always dump the latest logs on the way out
|
// make sure we always dump the latest logs on the way out
|
||||||
// especially since we're probably panicking
|
// especially since we're probably panicking
|
||||||
defer panicLog.Sync() //nolint:errcheck
|
defer panicLog.Sync() //nolint:errcheck
|
||||||
@ -82,21 +64,21 @@ func generatePanicReport(buildVersion BuildVersion, persistPath, repoPath, label
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
writeAppVersion(buildVersion, filepath.Join(reportPath, "version"))
|
writeAppVersion(filepath.Join(reportPath, "version"))
|
||||||
writeStackTrace(filepath.Join(reportPath, "stacktrace.dump"))
|
writeStackTrace(filepath.Join(reportPath, "stacktrace.dump"))
|
||||||
writeProfile("goroutines", filepath.Join(reportPath, "goroutines.pprof.gz"))
|
writeProfile("goroutines", filepath.Join(reportPath, "goroutines.pprof.gz"))
|
||||||
writeProfile("heap", filepath.Join(reportPath, "heap.pprof.gz"))
|
writeProfile("heap", filepath.Join(reportPath, "heap.pprof.gz"))
|
||||||
writeJournalTail(PanicReportJournalTail, repoPath, filepath.Join(reportPath, "journal.ndjson"))
|
writeJournalTail(PanicReportJournalTail, repoPath, filepath.Join(reportPath, "journal.ndjson"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeAppVersion(buildVersion BuildVersion, file string) {
|
func writeAppVersion(file string) {
|
||||||
f, err := os.Create(file)
|
f, err := os.Create(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panicLog.Error(err.Error())
|
panicLog.Error(err.Error())
|
||||||
}
|
}
|
||||||
defer f.Close() //nolint:errcheck
|
defer f.Close() //nolint:errcheck
|
||||||
|
|
||||||
versionString := []byte(string(buildVersion) + BuildTypeString() + CurrentCommit + "\n")
|
versionString := []byte(BuildVersion + BuildTypeString() + CurrentCommit + "\n")
|
||||||
if _, err := f.Write(versionString); err != nil {
|
if _, err := f.Write(versionString); err != nil {
|
||||||
panicLog.Error(err.Error())
|
panicLog.Error(err.Error())
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ var NetworkBundle = "devnet"
|
|||||||
var BundleOverrides map[actorstypes.Version]string
|
var BundleOverrides map[actorstypes.Version]string
|
||||||
var ActorDebugging = true
|
var ActorDebugging = true
|
||||||
|
|
||||||
var GenesisNetworkVersion = network.Version22
|
var GenesisNetworkVersion = network.Version21
|
||||||
|
|
||||||
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
|
||||||
|
|
||||||
@ -67,11 +67,9 @@ var UpgradeThunderHeight = abi.ChainEpoch(-23)
|
|||||||
|
|
||||||
var UpgradeWatermelonHeight = abi.ChainEpoch(-24)
|
var UpgradeWatermelonHeight = abi.ChainEpoch(-24)
|
||||||
|
|
||||||
var UpgradeDragonHeight = abi.ChainEpoch(-24)
|
var UpgradeDragonHeight = abi.ChainEpoch(20)
|
||||||
|
|
||||||
var UpgradePhoenixHeight = abi.ChainEpoch(-25)
|
var UpgradePhoenixHeight = UpgradeDragonHeight + 120
|
||||||
|
|
||||||
var UpgradeAussieHeight = abi.ChainEpoch(200)
|
|
||||||
|
|
||||||
// This fix upgrade only ran on calibrationnet
|
// This fix upgrade only ran on calibrationnet
|
||||||
const UpgradeWatermelonFixHeight = -100
|
const UpgradeWatermelonFixHeight = -100
|
||||||
@ -156,7 +154,6 @@ func init() {
|
|||||||
UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight)
|
UpgradeThunderHeight = getUpgradeHeight("LOTUS_THUNDER_HEIGHT", UpgradeThunderHeight)
|
||||||
UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight)
|
UpgradeWatermelonHeight = getUpgradeHeight("LOTUS_WATERMELON_HEIGHT", UpgradeWatermelonHeight)
|
||||||
UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight)
|
UpgradeDragonHeight = getUpgradeHeight("LOTUS_DRAGON_HEIGHT", UpgradeDragonHeight)
|
||||||
UpgradeAussieHeight = getUpgradeHeight("LOTUS_AUSSIE_HEIGHT", UpgradeAussieHeight)
|
|
||||||
|
|
||||||
UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight)
|
UpgradePhoenixHeight = getUpgradeHeight("LOTUS_PHOENIX_HEIGHT", UpgradePhoenixHeight)
|
||||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
|
@ -56,10 +56,10 @@ const UpgradeHyggeHeight = -21
|
|||||||
const UpgradeLightningHeight = -22
|
const UpgradeLightningHeight = -22
|
||||||
const UpgradeThunderHeight = -23
|
const UpgradeThunderHeight = -23
|
||||||
const UpgradeWatermelonHeight = -24
|
const UpgradeWatermelonHeight = -24
|
||||||
const UpgradeDragonHeight = -25
|
|
||||||
const UpgradePhoenixHeight = -26
|
|
||||||
|
|
||||||
const UpgradeAussieHeight = 400
|
const UpgradeDragonHeight = 5760
|
||||||
|
|
||||||
|
const UpgradePhoenixHeight = UpgradeDragonHeight + 120
|
||||||
|
|
||||||
// This fix upgrade only ran on calibrationnet
|
// This fix upgrade only ran on calibrationnet
|
||||||
const UpgradeWatermelonFixHeight = -100
|
const UpgradeWatermelonFixHeight = -100
|
||||||
|
@ -98,9 +98,6 @@ const UpgradePhoenixHeight = UpgradeDragonHeight + 120
|
|||||||
// 2024-04-03T11:00:00Z
|
// 2024-04-03T11:00:00Z
|
||||||
const UpgradeCalibrationDragonFixHeight = 1493854
|
const UpgradeCalibrationDragonFixHeight = 1493854
|
||||||
|
|
||||||
// ?????
|
|
||||||
const UpgradeAussieHeight = 999999999999999
|
|
||||||
|
|
||||||
var SupportedProofTypes = []abi.RegisteredSealProof{
|
var SupportedProofTypes = []abi.RegisteredSealProof{
|
||||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||||
|
@ -54,9 +54,8 @@ var UpgradeHyggeHeight = abi.ChainEpoch(-21)
|
|||||||
var UpgradeLightningHeight = abi.ChainEpoch(-22)
|
var UpgradeLightningHeight = abi.ChainEpoch(-22)
|
||||||
var UpgradeThunderHeight = abi.ChainEpoch(-23)
|
var UpgradeThunderHeight = abi.ChainEpoch(-23)
|
||||||
var UpgradeWatermelonHeight = abi.ChainEpoch(-24)
|
var UpgradeWatermelonHeight = abi.ChainEpoch(-24)
|
||||||
var UpgradeDragonHeight = abi.ChainEpoch(-25)
|
|
||||||
|
|
||||||
const UpgradeAussieHeight = 50
|
const UpgradeDragonHeight = 50
|
||||||
|
|
||||||
const UpgradePhoenixHeight = UpgradeDragonHeight + 100
|
const UpgradePhoenixHeight = UpgradeDragonHeight + 100
|
||||||
|
|
||||||
|
@ -100,14 +100,11 @@ const UpgradeThunderHeight = UpgradeLightningHeight + 2880*21
|
|||||||
const UpgradeWatermelonHeight = 3469380
|
const UpgradeWatermelonHeight = 3469380
|
||||||
|
|
||||||
// 2024-04-24T14:00:00Z
|
// 2024-04-24T14:00:00Z
|
||||||
const UpgradeDragonHeight = 3855360
|
var UpgradeDragonHeight = abi.ChainEpoch(3855360)
|
||||||
|
|
||||||
// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet
|
// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet
|
||||||
// 2024-04-11T15:00:00Z
|
// 2024-04-11T15:00:00Z
|
||||||
const UpgradePhoenixHeight = UpgradeDragonHeight + 120
|
var UpgradePhoenixHeight = UpgradeDragonHeight + 120
|
||||||
|
|
||||||
// ??????
|
|
||||||
var UpgradeAussieHeight = abi.ChainEpoch(9999999999)
|
|
||||||
|
|
||||||
// This fix upgrade only ran on calibrationnet
|
// This fix upgrade only ran on calibrationnet
|
||||||
const UpgradeWatermelonFixHeight = -1
|
const UpgradeWatermelonFixHeight = -1
|
||||||
@ -133,8 +130,10 @@ func init() {
|
|||||||
SetAddressNetwork(address.Mainnet)
|
SetAddressNetwork(address.Mainnet)
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.Getenv("LOTUS_DISABLE_AUSSIE") == "1" {
|
if os.Getenv("LOTUS_DISABLE_DRAGON") == "1" {
|
||||||
UpgradeAussieHeight = math.MaxInt64 - 1
|
UpgradeDragonHeight = math.MaxInt64 - 1
|
||||||
|
delete(DrandSchedule, UpgradePhoenixHeight)
|
||||||
|
UpgradePhoenixHeight = math.MaxInt64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
|
||||||
@ -166,5 +165,5 @@ const BootstrapPeerThreshold = 4
|
|||||||
// As per https://github.com/ethereum-lists/chains
|
// As per https://github.com/ethereum-lists/chains
|
||||||
const Eip155ChainId = 314
|
const Eip155ChainId = 314
|
||||||
|
|
||||||
// WhitelistedBlock skips checks on message validity in this block to sidestep the zero-bls signature
|
// we skip checks on message validity in this block to sidestep the zero-bls signature
|
||||||
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
|
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
|
||||||
|
@ -30,7 +30,7 @@ const AllowableClockDriftSecs = uint64(1)
|
|||||||
/* inline-gen template
|
/* inline-gen template
|
||||||
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
|
||||||
/* inline-gen start */
|
/* inline-gen start */
|
||||||
const TestNetworkVersion = network.Version23
|
const TestNetworkVersion = network.Version22
|
||||||
|
|
||||||
/* inline-gen end */
|
/* inline-gen end */
|
||||||
|
|
||||||
@ -124,7 +124,6 @@ const MinimumBaseFee = 100
|
|||||||
const PackingEfficiencyNum = 4
|
const PackingEfficiencyNum = 4
|
||||||
const PackingEfficiencyDenom = 5
|
const PackingEfficiencyDenom = 5
|
||||||
|
|
||||||
// revive:disable-next-line:exported
|
|
||||||
// Actor consts
|
// Actor consts
|
||||||
// TODO: pieceSize unused from actors
|
// TODO: pieceSize unused from actors
|
||||||
var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
|
var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)
|
||||||
|
@ -115,7 +115,6 @@ var (
|
|||||||
UpgradeDragonHeight abi.ChainEpoch = -26
|
UpgradeDragonHeight abi.ChainEpoch = -26
|
||||||
UpgradePhoenixHeight abi.ChainEpoch = -27
|
UpgradePhoenixHeight abi.ChainEpoch = -27
|
||||||
UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -28
|
UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -28
|
||||||
UpgradeAussieHeight abi.ChainEpoch = -29
|
|
||||||
|
|
||||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||||
0: DrandMainnet,
|
0: DrandMainnet,
|
||||||
|
@ -2,8 +2,6 @@ package build
|
|||||||
|
|
||||||
import "os"
|
import "os"
|
||||||
|
|
||||||
type BuildVersion string
|
|
||||||
|
|
||||||
var CurrentCommit string
|
var CurrentCommit string
|
||||||
var BuildType int
|
var BuildType int
|
||||||
|
|
||||||
@ -38,24 +36,13 @@ func BuildTypeString() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeBuildVersion is the local build version of the Lotus daemon
|
// BuildVersion is the local build version
|
||||||
const NodeBuildVersion string = "1.27.1-rc2"
|
const BuildVersion = "1.26.3"
|
||||||
|
|
||||||
func NodeUserVersion() BuildVersion {
|
func UserVersion() string {
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||||
return BuildVersion(NodeBuildVersion)
|
return BuildVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
return BuildVersion(NodeBuildVersion + BuildTypeString() + CurrentCommit)
|
return BuildVersion + BuildTypeString() + CurrentCommit
|
||||||
}
|
|
||||||
|
|
||||||
// MinerBuildVersion is the local build version of the Lotus miner
|
|
||||||
const MinerBuildVersion = "1.27.1-rc2"
|
|
||||||
|
|
||||||
func MinerUserVersion() BuildVersion {
|
|
||||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
|
||||||
return BuildVersion(MinerBuildVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
return BuildVersion(MinerBuildVersion + BuildTypeString() + CurrentCommit)
|
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
builtin13 "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/cbor"
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
"github.com/filecoin-project/go-state-types/manifest"
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Methods = builtin14.MethodsAccount
|
var Methods = builtin13.MethodsAccount
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
if name, av, ok := actors.GetActorMetaByCode(act.Code); ok {
|
||||||
@ -50,9 +50,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,9 +123,6 @@ func MakeState(store adt.Store, av actorstypes.Version, addr address.Address) (S
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store, addr)
|
return make13(store, addr)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store, addr)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -159,6 +153,5 @@ func AllCodes() []cid.Cid {
|
|||||||
(&state11{}).Code(),
|
(&state11{}).Code(),
|
||||||
(&state12{}).Code(),
|
(&state12{}).Code(),
|
||||||
(&state13{}).Code(),
|
(&state13{}).Code(),
|
||||||
(&state14{}).Code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
62
chain/actors/builtin/account/v14.go
generated
62
chain/actors/builtin/account/v14.go
generated
@ -1,62 +0,0 @@
|
|||||||
package account
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
account14 "github.com/filecoin-project/go-state-types/builtin/v14/account"
|
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store, addr address.Address) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
out.State = account14.State{Address: addr}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
account14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) PubkeyAddress() (address.Address, error) {
|
|
||||||
return s.Address, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorKey() string {
|
|
||||||
return manifest.AccountKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorVersion() actorstypes.Version {
|
|
||||||
return actorstypes.Version14
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Code() cid.Cid {
|
|
||||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return code
|
|
||||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
builtin13 "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
"github.com/filecoin-project/go-state-types/manifest"
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
|
||||||
@ -46,9 +46,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -122,16 +119,13 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store)
|
return make13(store)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin14.CronActorAddr
|
Address = builtin13.CronActorAddr
|
||||||
Methods = builtin14.MethodsCron
|
Methods = builtin13.MethodsCron
|
||||||
)
|
)
|
||||||
|
|
||||||
type State interface {
|
type State interface {
|
||||||
@ -157,6 +151,5 @@ func AllCodes() []cid.Cid {
|
|||||||
(&state11{}).Code(),
|
(&state11{}).Code(),
|
||||||
(&state12{}).Code(),
|
(&state12{}).Code(),
|
||||||
(&state13{}).Code(),
|
(&state13{}).Code(),
|
||||||
(&state14{}).Code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
57
chain/actors/builtin/cron/v14.go
generated
57
chain/actors/builtin/cron/v14.go
generated
@ -1,57 +0,0 @@
|
|||||||
package cron
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
cron14 "github.com/filecoin-project/go-state-types/builtin/v14/cron"
|
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
out.State = *cron14.ConstructState(cron14.BuiltInEntries())
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
cron14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorKey() string {
|
|
||||||
return manifest.CronKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorVersion() actorstypes.Version {
|
|
||||||
return actorstypes.Version14
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Code() cid.Cid {
|
|
||||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return code
|
|
||||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
builtin13 "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/cbor"
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
"github.com/filecoin-project/go-state-types/manifest"
|
||||||
|
|
||||||
@ -17,8 +17,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin14.DatacapActorAddr
|
Address = builtin13.DatacapActorAddr
|
||||||
Methods = builtin14.MethodsDatacap
|
Methods = builtin13.MethodsDatacap
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -44,9 +44,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,9 +68,6 @@ func MakeState(store adt.Store, av actorstypes.Version, governor address.Address
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store, governor, bitwidth)
|
return make13(store, governor, bitwidth)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store, governor, bitwidth)
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av)
|
return nil, xerrors.Errorf("datacap actor only valid for actors v9 and above, got %d", av)
|
||||||
}
|
}
|
||||||
@ -99,6 +93,5 @@ func AllCodes() []cid.Cid {
|
|||||||
(&state11{}).Code(),
|
(&state11{}).Code(),
|
||||||
(&state12{}).Code(),
|
(&state12{}).Code(),
|
||||||
(&state13{}).Code(),
|
(&state13{}).Code(),
|
||||||
(&state14{}).Code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
82
chain/actors/builtin/datacap/v14.go
generated
82
chain/actors/builtin/datacap/v14.go
generated
@ -1,82 +0,0 @@
|
|||||||
package datacap
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
datacap14 "github.com/filecoin-project/go-state-types/builtin/v14/datacap"
|
|
||||||
adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt"
|
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store, governor address.Address, bitwidth uint64) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
s, err := datacap14.ConstructState(store, governor, bitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out.State = *s
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
datacap14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Governor() (address.Address, error) {
|
|
||||||
return s.State.Governor, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
|
||||||
return forEachClient(s.store, actors.Version14, s.verifiedClients, cb)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) verifiedClients() (adt.Map, error) {
|
|
||||||
return adt14.AsMap(s.store, s.Token.Balances, int(s.Token.HamtBitWidth))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
|
||||||
return getDataCap(s.store, actors.Version14, s.verifiedClients, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorKey() string {
|
|
||||||
return manifest.DatacapKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorVersion() actorstypes.Version {
|
|
||||||
return actorstypes.Version14
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Code() cid.Cid {
|
|
||||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return code
|
|
||||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
builtin13 "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/cbor"
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
"github.com/filecoin-project/go-state-types/exitcode"
|
"github.com/filecoin-project/go-state-types/exitcode"
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
"github.com/filecoin-project/go-state-types/manifest"
|
||||||
@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Methods = builtin14.MethodsEVM
|
var Methods = builtin13.MethodsEVM
|
||||||
|
|
||||||
// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42
|
// See https://github.com/filecoin-project/builtin-actors/blob/6e781444cee5965278c46ef4ffe1fb1970f18d7d/actors/evm/src/lib.rs#L35-L42
|
||||||
const (
|
const (
|
||||||
@ -49,9 +49,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,9 +70,6 @@ func MakeState(store adt.Store, av actorstypes.Version, bytecode cid.Cid) (State
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store, bytecode)
|
return make13(store, bytecode)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store, bytecode)
|
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av)
|
return nil, xerrors.Errorf("evm actor only valid for actors v10 and above, got %d", av)
|
||||||
}
|
}
|
||||||
|
72
chain/actors/builtin/evm/v14.go
generated
72
chain/actors/builtin/evm/v14.go
generated
@ -1,72 +0,0 @@
|
|||||||
package evm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
evm14 "github.com/filecoin-project/go-state-types/builtin/v14/evm"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store, bytecode cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
s, err := evm14.ConstructState(store, bytecode)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out.State = *s
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
evm14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Nonce() (uint64, error) {
|
|
||||||
return s.State.Nonce, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) IsAlive() (bool, error) {
|
|
||||||
return s.State.Tombstone == nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetBytecodeCID() (cid.Cid, error) {
|
|
||||||
return s.State.Bytecode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetBytecodeHash() ([32]byte, error) {
|
|
||||||
return s.State.BytecodeHash, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetBytecode() ([]byte, error) {
|
|
||||||
bc, err := s.GetBytecodeCID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var byteCode abi.CborBytesTransparent
|
|
||||||
if err := s.store.Get(s.store.Context(), bc, &byteCode); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return byteCode, nil
|
|
||||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
"github.com/filecoin-project/go-state-types/abi"
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
builtin13 "github.com/filecoin-project/go-state-types/builtin"
|
||||||
"github.com/filecoin-project/go-state-types/cbor"
|
"github.com/filecoin-project/go-state-types/cbor"
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
"github.com/filecoin-project/go-state-types/manifest"
|
||||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||||
@ -25,8 +25,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Address = builtin14.InitActorAddr
|
Address = builtin13.InitActorAddr
|
||||||
Methods = builtin14.MethodsInit
|
Methods = builtin13.MethodsInit
|
||||||
)
|
)
|
||||||
|
|
||||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||||
@ -55,9 +55,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,9 +128,6 @@ func MakeState(store adt.Store, av actorstypes.Version, networkName string) (Sta
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store, networkName)
|
return make13(store, networkName)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store, networkName)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -187,6 +181,5 @@ func AllCodes() []cid.Cid {
|
|||||||
(&state11{}).Code(),
|
(&state11{}).Code(),
|
||||||
(&state12{}).Code(),
|
(&state12{}).Code(),
|
||||||
(&state13{}).Code(),
|
(&state13{}).Code(),
|
||||||
(&state14{}).Code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
147
chain/actors/builtin/init/v14.go
generated
147
chain/actors/builtin/init/v14.go
generated
@ -1,147 +0,0 @@
|
|||||||
package init
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
builtin14 "github.com/filecoin-project/go-state-types/builtin"
|
|
||||||
init14 "github.com/filecoin-project/go-state-types/builtin/v14/init"
|
|
||||||
adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt"
|
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store, networkName string) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
|
|
||||||
s, err := init14.ConstructState(store, networkName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out.State = *s
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
init14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
|
||||||
return s.State.ResolveAddress(s.store, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) MapAddressToNewID(address address.Address) (address.Address, error) {
|
|
||||||
return s.State.MapAddressToNewID(s.store, address)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
|
||||||
addrs, err := adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var actorID cbg.CborInt
|
|
||||||
return addrs.ForEach(&actorID, func(key string) error {
|
|
||||||
addr, err := address.NewFromBytes([]byte(key))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cb(abi.ActorID(actorID), addr)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) NetworkName() (dtypes.NetworkName, error) {
|
|
||||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) SetNetworkName(name string) error {
|
|
||||||
s.State.NetworkName = name
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) SetNextID(id abi.ActorID) error {
|
|
||||||
s.State.NextID = id
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Remove(addrs ...address.Address) (err error) {
|
|
||||||
m, err := adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
|
||||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
amr, err := m.Root()
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
|
||||||
}
|
|
||||||
s.State.AddressMap = amr
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) SetAddressMap(mcid cid.Cid) error {
|
|
||||||
s.State.AddressMap = mcid
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) AddressMap() (adt.Map, error) {
|
|
||||||
return adt14.AsMap(s.store, s.State.AddressMap, builtin14.DefaultHamtBitwidth)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) AddressMapBitWidth() int {
|
|
||||||
return builtin14.DefaultHamtBitwidth
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) AddressMapHashFunction() func(input []byte) []byte {
|
|
||||||
return func(input []byte) []byte {
|
|
||||||
res := sha256.Sum256(input)
|
|
||||||
return res[:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorKey() string {
|
|
||||||
return manifest.InitKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorVersion() actorstypes.Version {
|
|
||||||
return actorstypes.Version14
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Code() cid.Cid {
|
|
||||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return code
|
|
||||||
}
|
|
@ -143,7 +143,6 @@ type DealProposal = markettypes.DealProposal
|
|||||||
type DealLabel = markettypes.DealLabel
|
type DealLabel = markettypes.DealLabel
|
||||||
|
|
||||||
type DealState interface {
|
type DealState interface {
|
||||||
SectorNumber() abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number)
|
|
||||||
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
|
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
|
||||||
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
|
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
|
||||||
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
|
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
|
||||||
@ -186,10 +185,6 @@ type ProposalIDState struct {
|
|||||||
|
|
||||||
type emptyDealState struct{}
|
type emptyDealState struct{}
|
||||||
|
|
||||||
func (e *emptyDealState) SectorNumber() abi.SectorNumber {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch {
|
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
@ -61,9 +61,6 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return load13(store, act.Head)
|
return load13(store, act.Head)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return load14(store, act.Head)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,9 +134,6 @@ func MakeState(store adt.Store, av actorstypes.Version) (State, error) {
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return make13(store)
|
return make13(store)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return make14(store)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -241,9 +235,6 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora
|
|||||||
case actorstypes.Version13:
|
case actorstypes.Version13:
|
||||||
return decodePublishStorageDealsReturn13(b)
|
return decodePublishStorageDealsReturn13(b)
|
||||||
|
|
||||||
case actorstypes.Version14:
|
|
||||||
return decodePublishStorageDealsReturn14(b)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil, xerrors.Errorf("unknown actor version %d", av)
|
return nil, xerrors.Errorf("unknown actor version %d", av)
|
||||||
}
|
}
|
||||||
@ -252,7 +243,6 @@ type DealProposal = markettypes.DealProposal
|
|||||||
type DealLabel = markettypes.DealLabel
|
type DealLabel = markettypes.DealLabel
|
||||||
|
|
||||||
type DealState interface {
|
type DealState interface {
|
||||||
SectorNumber() abi.SectorNumber // 0 if not yet included in proven sector (0 is also a valid sector number)
|
|
||||||
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
|
SectorStartEpoch() abi.ChainEpoch // -1 if not yet included in proven sector
|
||||||
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
|
LastUpdatedEpoch() abi.ChainEpoch // -1 if deal state never updated
|
||||||
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
|
SlashEpoch() abi.ChainEpoch // -1 if deal never slashed
|
||||||
@ -294,10 +284,6 @@ type ProposalIDState struct {
|
|||||||
|
|
||||||
type emptyDealState struct{}
|
type emptyDealState struct{}
|
||||||
|
|
||||||
func (e *emptyDealState) SectorNumber() abi.SectorNumber {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch {
|
func (e *emptyDealState) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
@ -370,6 +356,5 @@ func AllCodes() []cid.Cid {
|
|||||||
(&state11{}).Code(),
|
(&state11{}).Code(),
|
||||||
(&state12{}).Code(),
|
(&state12{}).Code(),
|
||||||
(&state13{}).Code(),
|
(&state13{}).Code(),
|
||||||
(&state14{}).Code(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,14 +212,6 @@ type dealStateV{{.v}} struct {
|
|||||||
ds{{.v}} market{{.v}}.DealState
|
ds{{.v}} market{{.v}}.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV{{.v}}) SectorNumber() abi.SectorNumber {
|
|
||||||
{{if (le .v 12)}}
|
|
||||||
return 0
|
|
||||||
{{else}}
|
|
||||||
return d.ds{{.v}}.SectorNumber
|
|
||||||
{{end}}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV{{.v}}) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds{{.v}}.SectorStartEpoch
|
return d.ds{{.v}}.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v0.go
generated
6
chain/actors/builtin/market/v0.go
generated
@ -191,12 +191,6 @@ type dealStateV0 struct {
|
|||||||
ds0 market0.DealState
|
ds0 market0.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV0) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV0) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds0.SectorStartEpoch
|
return d.ds0.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v10.go
generated
6
chain/actors/builtin/market/v10.go
generated
@ -190,12 +190,6 @@ type dealStateV10 struct {
|
|||||||
ds10 market10.DealState
|
ds10 market10.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV10) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV10) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds10.SectorStartEpoch
|
return d.ds10.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v11.go
generated
6
chain/actors/builtin/market/v11.go
generated
@ -190,12 +190,6 @@ type dealStateV11 struct {
|
|||||||
ds11 market11.DealState
|
ds11 market11.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV11) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV11) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds11.SectorStartEpoch
|
return d.ds11.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v12.go
generated
6
chain/actors/builtin/market/v12.go
generated
@ -190,12 +190,6 @@ type dealStateV12 struct {
|
|||||||
ds12 market12.DealState
|
ds12 market12.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV12) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV12) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds12.SectorStartEpoch
|
return d.ds12.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v13.go
generated
6
chain/actors/builtin/market/v13.go
generated
@ -190,12 +190,6 @@ type dealStateV13 struct {
|
|||||||
ds13 market13.DealState
|
ds13 market13.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV13) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return d.ds13.SectorNumber
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV13) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds13.SectorStartEpoch
|
return d.ds13.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
410
chain/actors/builtin/market/v14.go
generated
410
chain/actors/builtin/market/v14.go
generated
@ -1,410 +0,0 @@
|
|||||||
package market
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/ipfs/go-cid"
|
|
||||||
cbg "github.com/whyrusleeping/cbor-gen"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/go-bitfield"
|
|
||||||
rlepluslazy "github.com/filecoin-project/go-bitfield/rle"
|
|
||||||
"github.com/filecoin-project/go-state-types/abi"
|
|
||||||
actorstypes "github.com/filecoin-project/go-state-types/actors"
|
|
||||||
"github.com/filecoin-project/go-state-types/builtin"
|
|
||||||
market14 "github.com/filecoin-project/go-state-types/builtin/v14/market"
|
|
||||||
adt14 "github.com/filecoin-project/go-state-types/builtin/v14/util/adt"
|
|
||||||
markettypes "github.com/filecoin-project/go-state-types/builtin/v9/market"
|
|
||||||
"github.com/filecoin-project/go-state-types/manifest"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors"
|
|
||||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
|
||||||
verifregtypes "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg"
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ State = (*state14)(nil)
|
|
||||||
|
|
||||||
func load14(store adt.Store, root cid.Cid) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
err := store.Get(store.Context(), root, &out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func make14(store adt.Store) (State, error) {
|
|
||||||
out := state14{store: store}
|
|
||||||
|
|
||||||
s, err := market14.ConstructState(store)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
out.State = *s
|
|
||||||
|
|
||||||
return &out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type state14 struct {
|
|
||||||
market14.State
|
|
||||||
store adt.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) TotalLocked() (abi.TokenAmount, error) {
|
|
||||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
|
||||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
|
||||||
return fml, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) BalancesChanged(otherState State) (bool, error) {
|
|
||||||
otherState14, ok := otherState.(*state14)
|
|
||||||
if !ok {
|
|
||||||
// there's no way to compare different versions of the state, so let's
|
|
||||||
// just say that means the state of balances has changed
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return !s.State.EscrowTable.Equals(otherState14.State.EscrowTable) || !s.State.LockedTable.Equals(otherState14.State.LockedTable), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) StatesChanged(otherState State) (bool, error) {
|
|
||||||
otherState14, ok := otherState.(*state14)
|
|
||||||
if !ok {
|
|
||||||
// there's no way to compare different versions of the state, so let's
|
|
||||||
// just say that means the state of balances has changed
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return !s.State.States.Equals(otherState14.State.States), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) States() (DealStates, error) {
|
|
||||||
stateArray, err := adt14.AsArray(s.store, s.State.States, market14.StatesAmtBitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &dealStates14{stateArray}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ProposalsChanged(otherState State) (bool, error) {
|
|
||||||
otherState14, ok := otherState.(*state14)
|
|
||||||
if !ok {
|
|
||||||
// there's no way to compare different versions of the state, so let's
|
|
||||||
// just say that means the state of balances has changed
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return !s.State.Proposals.Equals(otherState14.State.Proposals), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Proposals() (DealProposals, error) {
|
|
||||||
proposalArray, err := adt14.AsArray(s.store, s.State.Proposals, market14.ProposalsAmtBitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &dealProposals14{proposalArray}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) EscrowTable() (BalanceTable, error) {
|
|
||||||
bt, err := adt14.AsBalanceTable(s.store, s.State.EscrowTable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &balanceTable14{bt}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) LockedTable() (BalanceTable, error) {
|
|
||||||
bt, err := adt14.AsBalanceTable(s.store, s.State.LockedTable)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &balanceTable14{bt}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) VerifyDealsForActivation(
|
|
||||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
|
||||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
|
||||||
w, vw, _, err := market14.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
|
||||||
return w, vw, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) NextID() (abi.DealID, error) {
|
|
||||||
return s.State.NextID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type balanceTable14 struct {
|
|
||||||
*adt14.BalanceTable
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bt *balanceTable14) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
|
||||||
asMap := (*adt14.Map)(bt.BalanceTable)
|
|
||||||
var ta abi.TokenAmount
|
|
||||||
return asMap.ForEach(&ta, func(key string) error {
|
|
||||||
a, err := address.NewFromBytes([]byte(key))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return cb(a, ta)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type dealStates14 struct {
|
|
||||||
adt.Array
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealStates14) Get(dealID abi.DealID) (DealState, bool, error) {
|
|
||||||
var deal14 market14.DealState
|
|
||||||
found, err := s.Array.Get(uint64(dealID), &deal14)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
deal := fromV14DealState(deal14)
|
|
||||||
return deal, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealStates14) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
|
||||||
var ds14 market14.DealState
|
|
||||||
return s.Array.ForEach(&ds14, func(idx int64) error {
|
|
||||||
return cb(abi.DealID(idx), fromV14DealState(ds14))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealStates14) decode(val *cbg.Deferred) (DealState, error) {
|
|
||||||
var ds14 market14.DealState
|
|
||||||
if err := ds14.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ds := fromV14DealState(ds14)
|
|
||||||
return ds, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealStates14) array() adt.Array {
|
|
||||||
return s.Array
|
|
||||||
}
|
|
||||||
|
|
||||||
type dealStateV14 struct {
|
|
||||||
ds14 market14.DealState
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV14) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return d.ds14.SectorNumber
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV14) SectorStartEpoch() abi.ChainEpoch {
|
|
||||||
return d.ds14.SectorStartEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV14) LastUpdatedEpoch() abi.ChainEpoch {
|
|
||||||
return d.ds14.LastUpdatedEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV14) SlashEpoch() abi.ChainEpoch {
|
|
||||||
return d.ds14.SlashEpoch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV14) Equals(other DealState) bool {
|
|
||||||
if ov14, ok := other.(dealStateV14); ok {
|
|
||||||
return d.ds14 == ov14.ds14
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.SectorStartEpoch() != other.SectorStartEpoch() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if d.LastUpdatedEpoch() != other.LastUpdatedEpoch() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if d.SlashEpoch() != other.SlashEpoch() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ DealState = (*dealStateV14)(nil)
|
|
||||||
|
|
||||||
func fromV14DealState(v14 market14.DealState) DealState {
|
|
||||||
return dealStateV14{v14}
|
|
||||||
}
|
|
||||||
|
|
||||||
type dealProposals14 struct {
|
|
||||||
adt.Array
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealProposals14) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
|
||||||
var proposal14 market14.DealProposal
|
|
||||||
found, err := s.Array.Get(uint64(dealID), &proposal14)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
proposal, err := fromV14DealProposal(proposal14)
|
|
||||||
if err != nil {
|
|
||||||
return nil, true, xerrors.Errorf("decoding proposal: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &proposal, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealProposals14) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
|
||||||
var dp14 market14.DealProposal
|
|
||||||
return s.Array.ForEach(&dp14, func(idx int64) error {
|
|
||||||
dp, err := fromV14DealProposal(dp14)
|
|
||||||
if err != nil {
|
|
||||||
return xerrors.Errorf("decoding proposal: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cb(abi.DealID(idx), dp)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealProposals14) decode(val *cbg.Deferred) (*DealProposal, error) {
|
|
||||||
var dp14 market14.DealProposal
|
|
||||||
if err := dp14.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dp, err := fromV14DealProposal(dp14)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &dp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *dealProposals14) array() adt.Array {
|
|
||||||
return s.Array
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromV14DealProposal(v14 market14.DealProposal) (DealProposal, error) {
|
|
||||||
|
|
||||||
label, err := fromV14Label(v14.Label)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return DealProposal{}, xerrors.Errorf("error setting deal label: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return DealProposal{
|
|
||||||
PieceCID: v14.PieceCID,
|
|
||||||
PieceSize: v14.PieceSize,
|
|
||||||
VerifiedDeal: v14.VerifiedDeal,
|
|
||||||
Client: v14.Client,
|
|
||||||
Provider: v14.Provider,
|
|
||||||
|
|
||||||
Label: label,
|
|
||||||
|
|
||||||
StartEpoch: v14.StartEpoch,
|
|
||||||
EndEpoch: v14.EndEpoch,
|
|
||||||
StoragePricePerEpoch: v14.StoragePricePerEpoch,
|
|
||||||
|
|
||||||
ProviderCollateral: v14.ProviderCollateral,
|
|
||||||
ClientCollateral: v14.ClientCollateral,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func fromV14Label(v14 market14.DealLabel) (DealLabel, error) {
|
|
||||||
if v14.IsString() {
|
|
||||||
str, err := v14.ToString()
|
|
||||||
if err != nil {
|
|
||||||
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert string label to string: %w", err)
|
|
||||||
}
|
|
||||||
return markettypes.NewLabelFromString(str)
|
|
||||||
}
|
|
||||||
|
|
||||||
bs, err := v14.ToBytes()
|
|
||||||
if err != nil {
|
|
||||||
return markettypes.EmptyDealLabel, xerrors.Errorf("failed to convert bytes label to bytes: %w", err)
|
|
||||||
}
|
|
||||||
return markettypes.NewLabelFromBytes(bs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetState() interface{} {
|
|
||||||
return &s.State
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ PublishStorageDealsReturn = (*publishStorageDealsReturn14)(nil)
|
|
||||||
|
|
||||||
func decodePublishStorageDealsReturn14(b []byte) (PublishStorageDealsReturn, error) {
|
|
||||||
var retval market14.PublishStorageDealsReturn
|
|
||||||
if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
|
|
||||||
return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &publishStorageDealsReturn14{retval}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type publishStorageDealsReturn14 struct {
|
|
||||||
market14.PublishStorageDealsReturn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *publishStorageDealsReturn14) IsDealValid(index uint64) (bool, int, error) {
|
|
||||||
|
|
||||||
set, err := r.ValidDeals.IsSet(index)
|
|
||||||
if err != nil || !set {
|
|
||||||
return false, -1, err
|
|
||||||
}
|
|
||||||
maskBf, err := bitfield.NewFromIter(&rlepluslazy.RunSliceIterator{
|
|
||||||
Runs: []rlepluslazy.Run{rlepluslazy.Run{Val: true, Len: index}}})
|
|
||||||
if err != nil {
|
|
||||||
return false, -1, err
|
|
||||||
}
|
|
||||||
before, err := bitfield.IntersectBitField(maskBf, r.ValidDeals)
|
|
||||||
if err != nil {
|
|
||||||
return false, -1, err
|
|
||||||
}
|
|
||||||
outIdx, err := before.Count()
|
|
||||||
if err != nil {
|
|
||||||
return false, -1, err
|
|
||||||
}
|
|
||||||
return set, int(outIdx), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *publishStorageDealsReturn14) DealIDs() ([]abi.DealID, error) {
|
|
||||||
return r.IDs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) GetAllocationIdForPendingDeal(dealId abi.DealID) (verifregtypes.AllocationId, error) {
|
|
||||||
|
|
||||||
allocations, err := adt14.AsMap(s.store, s.PendingDealAllocationIds, builtin.DefaultHamtBitwidth)
|
|
||||||
if err != nil {
|
|
||||||
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var allocationId cbg.CborInt
|
|
||||||
found, err := allocations.Get(abi.UIntKey(uint64(dealId)), &allocationId)
|
|
||||||
if err != nil {
|
|
||||||
return verifregtypes.NoAllocationID, xerrors.Errorf("failed to load allocation id for %d: %w", dealId, err)
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return verifregtypes.NoAllocationID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return verifregtypes.AllocationId(allocationId), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorKey() string {
|
|
||||||
return manifest.MarketKey
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) ActorVersion() actorstypes.Version {
|
|
||||||
return actorstypes.Version14
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *state14) Code() cid.Cid {
|
|
||||||
code, ok := actors.GetActorCodeID(s.ActorVersion(), s.ActorKey())
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("didn't find actor %v code id for actor version %d", s.ActorKey(), s.ActorVersion()))
|
|
||||||
}
|
|
||||||
|
|
||||||
return code
|
|
||||||
}
|
|
6
chain/actors/builtin/market/v2.go
generated
6
chain/actors/builtin/market/v2.go
generated
@ -191,12 +191,6 @@ type dealStateV2 struct {
|
|||||||
ds2 market2.DealState
|
ds2 market2.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV2) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV2) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds2.SectorStartEpoch
|
return d.ds2.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
6
chain/actors/builtin/market/v3.go
generated
6
chain/actors/builtin/market/v3.go
generated
@ -186,12 +186,6 @@ type dealStateV3 struct {
|
|||||||
ds3 market3.DealState
|
ds3 market3.DealState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d dealStateV3) SectorNumber() abi.SectorNumber {
|
|
||||||
|
|
||||||
return 0
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch {
|
func (d dealStateV3) SectorStartEpoch() abi.ChainEpoch {
|
||||||
return d.ds3.SectorStartEpoch
|
return d.ds3.SectorStartEpoch
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user