v1.27.0-a #10

Closed
jonathanface wants to merge 473 commits from v1.27.0-a into master
452 changed files with 88833 additions and 12012 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,156 +0,0 @@
package main
import (
"embed"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"text/template"
)
var GoVersion = "" // from init below. Ex: 1.19.7
//go:generate go run ./gen.go ..
//go:embed template.yml
var templateFile embed.FS
func init() {
b, err := os.ReadFile("../go.mod")
if err != nil {
panic("cannot find go.mod in parent folder")
}
for _, line := range strings.Split(string(b), "\n") {
if strings.HasPrefix(line, "go ") {
GoVersion = line[3:]
}
}
}
type (
dirs = []string
suite = string
)
// groupedUnitTests maps suite names to top-level directories that should be
// included in that suite. The program adds an implicit group "rest" that
// includes all other top-level directories.
var groupedUnitTests = map[suite]dirs{
"unit-node": {"node"},
"unit-storage": {"storage", "extern"},
"unit-cli": {"cli", "cmd", "api"},
}
func main() {
if len(os.Args) != 2 {
panic("expected path to repo as argument")
}
repo := os.Args[1]
tmpl := template.New("template.yml")
tmpl.Delims("[[", "]]")
tmpl.Funcs(template.FuncMap{
"stripSuffix": func(in string) string {
return strings.TrimSuffix(in, "_test.go")
},
})
tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
// list all itests.
itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
if err != nil {
panic(err)
}
// strip the dir from all entries.
for i, f := range itests {
itests[i] = filepath.Base(f)
}
// calculate the exclusion set of unit test directories to exclude because
// they are already included in a grouped suite.
var excluded = map[string]struct{}{}
for _, ss := range groupedUnitTests {
for _, s := range ss {
e, err := filepath.Abs(filepath.Join(repo, s))
if err != nil {
panic(err)
}
// Redundantly flag both absolute and relative paths as excluded
excluded[filepath.Join(repo, s)] = struct{}{}
excluded[e] = struct{}{}
}
}
// all unit tests top-level dirs that are not itests, nor included in other suites.
var rest = map[string]struct{}{}
err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
// include all tests that aren't in the itests directory.
if strings.Contains(path, "itests") {
return filepath.SkipDir
}
// exclude all tests included in other suites
if f.IsDir() {
if _, ok := excluded[path]; ok {
return filepath.SkipDir
}
}
if strings.HasSuffix(path, "_test.go") {
rel, err := filepath.Rel(repo, path)
if err != nil {
panic(err)
}
// take the first directory
rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
}
return err
})
if err != nil {
panic(err)
}
// add other directories to a 'rest' suite.
for k := range rest {
groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
}
// map iteration guarantees no order, so sort the array in-place.
sort.Strings(groupedUnitTests["unit-rest"])
// form the input data.
type data struct {
Networks []string
ItestFiles []string
UnitSuites map[string]string
GoVersion string
}
in := data{
Networks: []string{"mainnet", "butterflynet", "calibnet", "debug"},
ItestFiles: itests,
UnitSuites: func() map[string]string {
ret := make(map[string]string)
for name, dirs := range groupedUnitTests {
for i, d := range dirs {
dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
}
ret[name] = strings.Join(dirs, " ")
}
return ret
}(),
GoVersion: GoVersion,
}
out, err := os.Create("./config.yml")
if err != nil {
panic(err)
}
defer out.Close()
// execute the template.
if err := tmpl.Execute(out, in); err != nil {
panic(err)
}
}

View File

@ -1,759 +0,0 @@
version: 2.1
orbs:
aws-cli: circleci/aws-cli@4.1.1
docker: circleci/docker@2.3.0
executors:
golang:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7
resource_class: medium+
golang-2xl:
docker:
# Must match GO_VERSION_MIN in project root
- image: cimg/go:1.21.7
resource_class: 2xlarge
ubuntu:
docker:
- image: ubuntu:20.04
commands:
build-platform-specific:
parameters:
linux:
default: true
description: is a linux build environment?
type: boolean
darwin:
default: false
description: is a darwin build environment?
type: boolean
darwin-architecture:
default: "amd64"
description: which darwin architecture is being used?
type: string
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- when:
condition: <<parameters.linux>>
steps:
- install-ubuntu-deps
- check-go-version
- when:
condition: <<parameters.darwin>>
steps:
- run:
name: Install Go
command: |
curl https://dl.google.com/go/go`cat GO_VERSION_MIN`.darwin-<<parameters.darwin-architecture>>.pkg -o /tmp/go.pkg && \
sudo installer -pkg /tmp/go.pkg -target /
- run:
name: Export Go
command: |
echo 'export GOPATH="${HOME}/go"' >> $BASH_ENV
- run: go version
- run:
name: Install dependencies with Homebrew
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config coreutils jq hwloc
- run:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run: make deps
download-params:
steps:
- restore_cache:
name: Restore parameters cache
keys:
- 'v26-2k-lotus-params'
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
key: 'v26-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
install_ipfs:
steps:
- run: |
curl -O https://dist.ipfs.tech/kubo/v0.16.0/kubo_v0.16.0_linux-amd64.tar.gz
tar -xvzf kubo_v0.16.0_linux-amd64.tar.gz
pushd kubo
sudo bash install.sh
popd
rm -rf kubo
rm kubo_v0.16.0_linux-amd64.tar.gz
git_fetch_all_tags:
steps:
- run:
name: fetch all tags
command: |
git fetch --all
install-ubuntu-deps:
steps:
- run: sudo apt install curl ca-certificates gnupg
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
check-go-version:
steps:
- run: |
v=`go version | { read _ _ v _; echo ${v#go}; }`
if [["[[ $v != `cat GO_VERSION_MIN` ]]"]]; then
echo "GO_VERSION_MIN file does not match the go version being used."
echo "Please update image to cimg/go:`cat GO_VERSION_MIN` or update GO_VERSION_MIN to $v."
exit 1
fi
jobs:
build:
executor: golang
working_directory: ~/lotus
steps:
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- install-ubuntu-deps
- check-go-version
- run: make deps lotus
- persist_to_workspace:
root: ~/
paths:
- "lotus"
mod-tidy-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go mod tidy -v
- run:
name: Check git diff
command: |
git --no-pager diff go.mod go.sum
git --no-pager diff --quiet go.mod go.sum
test:
description: |
Run tests with gotestsum.
working_directory: ~/lotus
parameters: &test-params
resource_class:
type: string
default: medium+
go-test-flags:
type: string
default: "-timeout 20m"
description: Flags passed to go test.
target:
type: string
default: "./..."
description: Import paths of packages to be tested.
proofs-log-test:
type: string
default: "0"
get-params:
type: boolean
default: false
suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
docker:
- image: cimg/go:[[ .GoVersion]]
environment:
LOTUS_HARMONYDB_HOSTS: yugabyte
- image: yugabytedb/yugabyte:2.18.0.0-b65
command: bin/yugabyted start --daemon=false
name: yugabyte
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- when:
condition: << parameters.get-params >>
steps:
- download-params
- run:
name: go test
environment:
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
SKIP_CONFORMANCE: "1"
LOTUS_SRC_DIR: /home/circleci/project
command: |
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
dockerize -wait tcp://yugabyte:5433 -timeout 3m
env
gotestsum \
--format standard-verbose \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
--packages="<< parameters.target >>" \
-- << parameters.go-test-flags >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/<< parameters.suite >>.json
test-conformance:
working_directory: ~/lotus
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
implementations to test their correctness and compliance with the Filecoin
specifications.
parameters:
<<: *test-params
vectors-branch:
type: string
default: ""
description: |
Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git
submodule is used.
docker:
- image: cimg/go:[[ .GoVersion]]
resource_class: << parameters.resource_class >>
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- download-params
- when:
condition:
not:
equal: [ "", << parameters.vectors-branch >> ]
steps:
- run:
name: checkout vectors branch
command: |
cd extern/test-vectors
git fetch
git checkout origin/<< parameters.vectors-branch >>
- run:
name: install statediff globally
command: |
## statediff is optional; we succeed even if compilation fails.
mkdir -p /tmp/statediff
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
cd /tmp/statediff
go install ./cmd/statediff || exit 0
- run:
name: go test
environment:
SKIP_CONFORMANCE: "0"
command: |
mkdir -p /tmp/test-reports
mkdir -p /tmp/test-artifacts
gotestsum \
--format pkgname-and-test-fails \
--junitfile /tmp/test-reports/junit.xml \
-- \
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/conformance-coverage.html
build-linux-amd64:
executor: golang
steps:
- build-platform-specific
- run: make lotus lotus-miner lotus-worker
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/linux_amd64_v1 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/linux_amd64_v1/
- persist_to_workspace:
root: /tmp/workspace
paths:
- linux_amd64_v1
build-darwin-amd64:
description: build darwin lotus binary
working_directory: ~/go/src/github.com/filecoin-project/lotus
macos:
xcode: "13.4.1"
steps:
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: amd64
- run: make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/darwin_amd64_v1 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_amd64_v1/
- persist_to_workspace:
root: /tmp/workspace
paths:
- darwin_amd64_v1
build-darwin-arm64:
description: self-hosted m1 runner
working_directory: ~/go/src/github.com/filecoin-project/lotus
machine: true
resource_class: filecoin-project/self-hosted-m1
steps:
- run: echo 'export PATH=/opt/homebrew/bin:"$PATH"' >> "$BASH_ENV"
- build-platform-specific:
linux: false
darwin: true
darwin-architecture: arm64
- run: |
export CPATH=$(brew --prefix)/include && export LIBRARY_PATH=$(brew --prefix)/lib && make lotus lotus-miner lotus-worker
- run: otool -hv lotus
- run:
name: check tag and version output match
command: ./scripts/version-check.sh ./lotus
- run: |
mkdir -p /tmp/workspace/darwin_arm64 && \
mv lotus lotus-miner lotus-worker /tmp/workspace/darwin_arm64/
- persist_to_workspace:
root: /tmp/workspace
paths:
- darwin_arm64
- run:
command: make clean
when: always
- run:
name: cleanup homebrew
command: HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall pkg-config coreutils jq hwloc
when: always
release:
executor: golang
parameters:
dry-run:
default: false
description: should this release actually publish it's artifacts?
type: boolean
steps:
- checkout
- run: |
echo 'deb [trusted=yes] https://repo.goreleaser.com/apt/ /' | sudo tee /etc/apt/sources.list.d/goreleaser.list
sudo apt update
sudo apt install goreleaser-pro
- install_ipfs
- attach_workspace:
at: /tmp/workspace
- when:
condition: << parameters.dry-run >>
steps:
- run: goreleaser release --rm-dist --snapshot --debug
- run: ./scripts/generate-checksums.sh
- when:
condition:
not: << parameters.dry-run >>
steps:
- run: goreleaser release --rm-dist --debug
- run: ./scripts/generate-checksums.sh
- run: ./scripts/publish-checksums.sh
gofmt:
executor: golang
working_directory: ~/lotus
steps:
- run:
command: "! go fmt ./... 2>&1 | read"
gen-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
- run: git --no-pager diff && git --no-pager diff --quiet
- run: make docsgen-cli
- run: git --no-pager diff && git --no-pager diff --quiet
docs-check:
executor: golang
working_directory: ~/lotus
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: diff ../pre-openrpc-full ../post-openrpc-full && diff ../pre-openrpc-miner ../post-openrpc-miner && diff ../pre-openrpc-worker ../post-openrpc-worker && git --no-pager diff && git --no-pager diff --quiet
lint-all:
description: |
Run golangci-lint.
working_directory: ~/lotus
parameters:
args:
type: string
default: ''
description: |
Arguments to pass to golangci-lint
docker:
- image: cimg/go:[[ .GoVersion]]
resource_class: medium+
steps:
- install-ubuntu-deps
- attach_workspace:
at: ~/
- run:
name: Lint
command: |
golangci-lint run -v --timeout 10m \
--concurrency 4 << parameters.args >>
build-docker:
description: >
Publish to Dockerhub
executor: docker/docker
parameters:
image:
type: string
default: lotus
description: >
Passed to the docker build process to determine which image in the
Dockerfile should be built. Expected values are `lotus`,
`lotus-all-in-one`
network:
type: string
default: "mainnet"
description: >
Passed to the docker build process using GOFLAGS+=-tags=<<network>>.
Expected values are `debug`, `2k`, `calibnet`, `butterflynet`,
`interopnet`.
channel:
type: string
default: ""
description: >
The release channel to use for this image.
push:
type: boolean
default: false
description: >
When true, pushes the image to Dockerhub
steps:
- setup_remote_docker
- checkout
- git_fetch_all_tags
- run: git submodule sync
- run: git submodule update --init
- docker/check:
docker-username: DOCKERHUB_USERNAME
docker-password: DOCKERHUB_PASSWORD
- when:
condition:
equal: [ mainnet, <<parameters.network>> ]
steps:
- when:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>>
tag: <<parameters.channel>>
- run:
name: Docker push
command: |
docker push filecoin/<<parameters.image>>:<<parameters.channel>>
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"
fi
- unless:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>>
- when:
condition:
not:
equal: [ mainnet, <<parameters.network>> ]
steps:
- when:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
tag: <<parameters.channel>>-<<parameters.network>>
- run:
name: Docker push
command: |
docker push filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>>
if [["[[ ! -z $CIRCLE_SHA ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
docker push filecoin/<<parameters.image>>:"${CIRCLE_SHA:0:7}"-<<parameters.network>>
fi
if [["[[ ! -z $CIRCLE_TAG ]]"]]; then
docker image tag filecoin/<<parameters.image>>:<<parameters.channel>>-<<parameters.network>> filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
docker push filecoin/<<parameters.image>>:"${CIRCLE_TAG}"-<<parameters.network>>
fi
- unless:
condition: <<parameters.push>>
steps:
- docker/build:
image: filecoin/<<parameters.image>>
extra_build_args: --target <<parameters.image>> --build-arg GOFLAGS=-tags=<<parameters.network>>
workflows:
ci:
jobs:
- build
- lint-all:
requires:
- build
- mod-tidy-check:
requires:
- build
- gofmt:
requires:
- build
- gen-check:
requires:
- build
- docs-check:
requires:
- build
[[- range $file := .ItestFiles -]]
[[ with $name := $file | stripSuffix ]]
- test:
name: test-itest-[[ $name ]]
requires:
- build
suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]"
[[- if or (eq $name "worker") (eq $name "deals_concurrent") (eq $name "wdpost_worker_config") (eq $name "sector_pledge")]]
resource_class: 2xlarge
[[- end]]
[[- if or (eq $name "wdpost") (eq $name "sector_pledge")]]
get-params: true
[[end]]
[[- end ]][[- end]]
[[- range $suite, $pkgs := .UnitSuites]]
- test:
name: test-[[ $suite ]]
requires:
- build
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[- if eq $suite "unit-storage"]]
get-params: true
[[- end -]]
[[- if eq $suite "unit-cli"]]
resource_class: 2xlarge
get-params: true
[[- end -]]
[[- if eq $suite "unit-rest"]]
resource_class: 2xlarge
[[- end -]]
[[- end]]
- test:
go-test-flags: "-run=TestMulticoreSDR"
requires:
- build
suite: multicore-sdr-check
target: "./storage/sealer/ffiwrapper"
proofs-log-test: "1"
- test-conformance:
requires:
- build
suite: conformance
target: "./conformance"
release:
jobs:
- build-linux-amd64:
name: "Build ( linux / amd64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-darwin-amd64:
name: "Build ( darwin / amd64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-darwin-arm64:
name: "Build ( darwin / arm64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- release:
name: "Release"
requires:
- "Build ( darwin / amd64 )"
- "Build ( linux / amd64 )"
- "Build ( darwin / arm64 )"
filters:
branches:
ignore:
- /^.*$/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- release:
name: "Release (dry-run)"
dry-run: true
requires:
- "Build ( darwin / amd64 )"
- "Build ( linux / amd64 )"
- "Build ( darwin / arm64 )"
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
- /^ci\/.*$/
[[- range .Networks]]
- build-docker:
name: "Docker push (lotus-all-in-one / stable / [[.]])"
image: lotus-all-in-one
channel: stable
network: [[.]]
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-docker:
name: "Docker push (lotus-all-in-one / candidate / [[.]])"
image: lotus-all-in-one
channel: candidate
network: [[.]]
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+-rc\d+$/
- build-docker:
name: "Docker push (lotus-all-in-one / edge / [[.]])"
image: lotus-all-in-one
channel: master
network: [[.]]
push: true
filters:
branches:
only:
- master
- build-docker:
name: "Docker build (lotus-all-in-one / [[.]])"
image: lotus-all-in-one
network: [[.]]
push: false
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
[[- end]]
- build-docker:
name: "Docker push (lotus / stable / mainnet)"
image: lotus
channel: stable
network: mainnet
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+$/
- build-docker:
name: "Docker push (lotus / candidate / mainnet)"
image: lotus
channel: candidate
network: mainnet
push: true
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+-rc\d+$/
- build-docker:
name: "Docker push (lotus / master / mainnet)"
image: lotus
channel: master
network: mainnet
push: true
filters:
branches:
only:
- master
- build-docker:
name: "Docker build (lotus / mainnet)"
image: lotus
network: mainnet
push: false
filters:
branches:
only:
- /^release\/v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters:
branches:
only:
- master
jobs:
[[- range .Networks]]
- build-docker:
name: "Docker (lotus-all-in-one / nightly / [[.]])"
image: lotus-all-in-one
channel: nightly
network: [[.]]
push: true
[[- end]]

3
.github/CODEOWNERS vendored
View File

@ -1,6 +1,3 @@
# Reference # Reference
# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-on-github/about-code-owners
# Global owners
# Ensure maintainers team is a requested reviewer for non-draft PRs
* @filecoin-project/lotus-maintainers

16
.github/actions/install-go/action.yml vendored Normal file
View File

@ -0,0 +1,16 @@
name: Install Go
description: Install Go for Filecoin Lotus
runs:
using: composite
steps:
- uses: actions/setup-go@v5
with:
go-version: stable
cache: false
- id: go-mod
uses: ipdxco/unified-github-workflows/.github/actions/read-go-mod@main
- uses: actions/setup-go@v5
with:
go-version: ${{ fromJSON(steps.go-mod.outputs.json).Go }}.x
cache: false

View File

@ -0,0 +1,21 @@
name: Install System Dependencies
description: Install System dependencies for Filecoin Lotus
runs:
using: composite
steps:
- if: runner.os == 'Linux'
run: |
# List processes to enable debugging in case /var/lib/apt/lists/ is locked
ps aux
sudo apt-get update -y
sudo apt-get install -y ocl-icd-opencl-dev libhwloc-dev pkg-config
shell: bash
- if: runner.os == 'macOS'
env:
HOMEBREW_NO_AUTO_UPDATE: '1'
run: |
brew install hwloc pkg-config
echo "CPATH=$(brew --prefix)/include" | tee -a $GITHUB_ENV
echo "LIBRARY_PATH=$(brew --prefix)/lib" | tee -a $GITHUB_ENV
shell: bash

View File

@ -0,0 +1,16 @@
name: Start YugabyteDB
description: Install Yugabyte Database for Filecoin Lotus
runs:
using: composite
steps:
- run: docker run --rm --name yugabyte -d -p 5433:5433 yugabytedb/yugabyte:2.18.0.0-b65 bin/yugabyted start --daemon=false
shell: bash
- run: |
while true; do
status=$(docker exec yugabyte bin/yugabyted status);
echo $status;
echo $status | grep Running && break;
sleep 1;
done
shell: bash

248
.github/labels.yml vendored
View File

@ -1,248 +0,0 @@
###
### Special magic GitHub labels
### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels
#
- name: "good first issue"
color: 7057ff
description: "Good for newcomers"
- name: "help wanted"
color: 008672
description: "Extra attention is needed"
###
### Goals
#
- name: goal/incentives
color: ff004d
description: "Incentinet"
###
### Areas
#
- name: area/ux
color: 00A4E0
description: "Area: UX"
- name: area/chain/vm
color: 00A4E2
description: "Area: Chain/VM"
- name: area/chain/sync
color: 00A4E4
description: "Area: Chain/Sync"
- name: area/chain/misc
color: 00A4E6
description: "Area: Chain/Misc"
- name: area/markets
color: 00A4E8
description: "Area: Markets"
- name: area/sealing/fsm
color: 0bb1ed
description: "Area: Sealing/FSM"
- name: area/sealing/storage
color: 0EB4F0
description: "Area: Sealing/Storage"
- name: area/proving
color: 0EB4F0
description: "Area: Proving"
- name: area/mining
color: 10B6F2
description: "Area: Mining"
- name: area/client/storage
color: 13B9F5
description: "Area: Client/Storage"
- name: area/client/retrieval
color: 15BBF7
description: "Area: Client/Retrieval"
- name: area/wallet
color: 15BBF7
description: "Area: Wallet"
- name: area/payment-channel
color: ff6767
description: "Area: Payment Channel"
- name: area/multisig
color: fff0ff
description: "Area: Multisig"
- name: area/networking
color: 273f8a
description: "Area: Networking"
###
### Kinds
#
- name: kind/bug
color: c92712
description: "Kind: Bug"
- name: kind/chore
color: fcf0b5
description: "Kind: Chore"
- name: kind/feature
color: FFF3B8
description: "Kind: Feature"
- name: kind/improvement
color: FFF5BA
description: "Kind: Improvement"
- name: kind/test
color: FFF8BD
description: "Kind: Test"
- name: kind/question
color: FFFDC2
description: "Kind: Question"
- name: kind/enhancement
color: FFFFC5
description: "Kind: Enhancement"
- name: kind/discussion
color: FFFFC7
description: "Kind: Discussion"
###
### Difficulties
#
- name: dif/trivial
color: b2b7ff
description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus"
- name: dif/easy
color: 7886d7
description: "An existing lotus user should be able to pick this up"
- name: dif/medium
color: 6574cd
description: "Prior development experience with lotus is likely helpful"
- name: dif/hard
color: 5661b3
description: "Suggests that having worked on the specific component affected by this issue is important"
- name: dif/expert
color: 2f365f
description: "Requires extensive knowledge of the history, implications, ramifications of the issue"
###
### Efforts
#
- name: effort/minutes
color: e8fffe
description: "Effort: Minutes"
- name: effort/hours
color: a0f0ed
description: "Effort: Hours"
- name: effort/day
color: 64d5ca
description: "Effort: One Day"
- name: effort/days
color: 4dc0b5
description: "Effort: Multiple Days"
- name: effort/week
color: 38a89d
description: "Effort: One Week"
- name: effort/weeks
color: 20504f
description: "Effort: Multiple Weeks"
###
### Impacts
#
- name: impact/regression
color: f1f5f8
description: "Impact: Regression"
- name: impact/api-breakage
color: ECF0F3
description: "Impact: API Breakage"
- name: impact/quality
color: E7EBEE
description: "Impact: Quality"
- name: impact/dx
color: E2E6E9
description: "Impact: Developer Experience"
- name: impact/test-flakiness
color: DDE1E4
description: "Impact: Test Flakiness"
- name: impact/consensus
color: b20014
description: "Impact: Consensus"
###
### Topics
#
- name: topic/interoperability
color: bf0f73
description: "Topic: Interoperability"
- name: topic/specs
color: CC1C80
description: "Topic: Specs"
- name: topic/docs
color: D9298D
description: "Topic: Documentation"
- name: topic/architecture
color: E53599
description: "Topic: Architecture"
###
### Priorities
###
- name: P0
color: dd362a
description: "P0: Critical Blocker"
- name: P1
color: ce8048
description: "P1: Must be resolved"
- name: P2
color: dbd81a
description: "P2: Should be resolved"
- name: P3
color: 9fea8f
description: "P3: Might get resolved"
###
### Hints
#
#- name: hint/good-first-issue
# color: 7057ff
# description: "Hint: Good First Issue"
#- name: hint/help-wanted
# color: 008672
# description: "Hint: Help Wanted"
- name: hint/needs-decision
color: 33B9A5
description: "Hint: Needs Decision"
- name: hint/needs-triage
color: 1AA08C
description: "Hint: Needs Triage"
- name: hint/needs-analysis
color: 26AC98
description: "Hint: Needs Analysis"
- name: hint/needs-author-input
color: 33B9A5
description: "Hint: Needs Author Input"
- name: hint/needs-team-input
color: 40C6B2
description: "Hint: Needs Team Input"
- name: hint/needs-community-input
color: 4DD3BF
description: "Hint: Needs Community Input"
- name: hint/needs-review
color: 5AE0CC
description: "Hint: Needs Review"
###
### Statuses
#
- name: status/done
color: edb3a6
description: "Status: Done"
- name: status/deferred
color: E0A699
description: "Status: Deferred"
- name: status/in-progress
color: D49A8D
description: "Status: In Progress"
- name: status/blocked
color: C78D80
description: "Status: Blocked"
- name: status/inactive
color: BA8073
description: "Status: Inactive"
- name: status/waiting
color: AD7366
description: "Status: Waiting"
- name: status/rotten
color: 7A4033
description: "Status: Rotten"
- name: status/discarded
color: 6D3326
description: "Status: Discarded / Won't fix"

31
.github/workflows/build.yml vendored Normal file
View File

@ -0,0 +1,31 @@
name: Build
on:
pull_request:
push:
branches:
- master
- release/*
workflow_dispatch:
defaults:
run:
shell: bash
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
permissions: {}
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- run: make deps lotus

View File

@ -0,0 +1,22 @@
name: Built-in Actors
on:
push:
paths:
- build/actors
- build/builtin_actors_gen.go
branches:
- release/*
permissions: {}
jobs:
release:
name: Release Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: 1.21
- run: go test -tags=release ./build

82
.github/workflows/check.yml vendored Normal file
View File

@ -0,0 +1,82 @@
name: Check
on:
pull_request:
push:
branches:
- master
- release/*
workflow_dispatch:
defaults:
run:
shell: bash
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
permissions: {}
jobs:
check-docsgen:
name: Check (docs-check)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- run: go install golang.org/x/tools/cmd/goimports
- run: make deps
- run: make docsgen
- run: git diff --exit-code
check-gen:
name: Check (gen-check)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- run: make deps lotus
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
- run: git diff --exit-code
- run: make docsgen-cli
- run: git diff --exit-code
check-lint:
name: Check (lint-all)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- run: go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.58.2
- run: make deps
- run: golangci-lint run -v --timeout 10m --concurrency 4
check-fmt:
name: Check (gofmt)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-go
- run: go fmt ./...
- run: git diff --exit-code
check-mod-tidy:
name: Check (mod-tidy-check)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-go
- run: go mod tidy -v
- run: git diff --exit-code

View File

@ -1,73 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches:
- master
- 'release/*'
pull_request:
# The branches below must be a subset of the branches above
branches:
- master
- 'release/*'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: '1.18.8'
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: go
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

94
.github/workflows/docker.yml vendored Normal file
View File

@ -0,0 +1,94 @@
name: Docker
on:
push:
branches:
- master
- release/*
tags:
- v*
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
defaults:
run:
shell: bash
permissions: {}
jobs:
docker:
name: Docker (${{ matrix.image }} / ${{ matrix.network }}) [publish=${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }}]
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
image:
- lotus-all-in-one
network:
- mainnet
- butterflynet
- calibnet
- debug
include:
- image: lotus
network: mainnet
env:
PUBLISH: ${{ github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/tags/') }}
steps:
- id: channel
env:
IS_MASTER: ${{ github.ref == 'refs/heads/master' }}
IS_TAG: ${{ startsWith(github.ref, 'refs/tags/') }}
IS_RC: ${{ endsWith(github.ref, '-rc') }}
IS_SCHEDULED: ${{ github.event_name == 'schedule' }}
run: |
channel=''
if [[ "$IS_MASTER" == 'true' ]]; then
if [[ "$IS_SCHEDULED" == 'true' ]]; then
channel=nightly
else
channel=master
fi
elif [[ "$IS_TAG" == 'true' ]]; then
if [[ "$IS_RC" == 'true' ]]; then
channel=candidate
else
channel=stable
fi
fi
echo "channel=$channel" | tee -a $GITHUB_ENV
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: filecoin/${{ matrix.image }}
tags: |
type=schedule
type=raw,enable=${{ github.event_name != 'schedule' && steps.channel.outputs.channel != '' }},value=${{ steps.channel.outputs.channel }}
type=ref,event=tag
type=sha,prefix=
flavor: |
latest=false
suffix=${{ matrix.network != 'mainnet' && format('-{0}', matrix.network) || '' }}
- if: env.PUBLISH == 'true'
name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ vars.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push if channel is set (channel=${{ steps.channel.outputs.channel }})
uses: docker/build-push-action@v5
with:
context: .
push: ${{ env.PUBLISH == 'true' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
${{ matrix.network != 'mainnet' && format('GOFLAGS=-tags={0}', matrix.network) || ''}}

View File

@ -1,17 +0,0 @@
name: Label syncer
on:
push:
paths:
- '.github/labels.yml'
branches:
- master
jobs:
build:
name: Sync labels
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@1.0.0
- uses: micnncim/action-label-syncer@v1.0.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

105
.github/workflows/release.yml vendored Normal file
View File

@ -0,0 +1,105 @@
name: Release
on:
push:
branches:
- ci/*
- release/*
tags:
- v*
workflow_dispatch:
defaults:
run:
shell: bash
permissions: {}
jobs:
build:
name: Build (${{ matrix.os }}/${{ matrix.arch }})
runs-on: ${{ matrix.runner }}
strategy:
fail-fast: false
matrix:
include:
- runner: ubuntu-latest
os: Linux
arch: X64
- runner: macos-13
os: macOS
arch: X64
- runner: macos-14
os: macOS
arch: ARM64
steps:
- env:
OS: ${{ matrix.os }}
ARCH: ${{ matrix.arch }}
run: |
if [[ "$OS" != "$RUNNER_OS" || "$ARCH" != "$RUNNER_ARCH" ]]; then
echo "::error title=Unexpected Runner::Expected $OS/$ARCH, got $RUNNER_OS/$RUNNER_ARCH"
exit 1
fi
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- env:
GITHUB_TOKEN: ${{ github.token }}
run: make deps lotus lotus-miner lotus-worker
- if: runner.os == 'macOS'
run: otool -hv lotus
- run: ./scripts/version-check.sh ./lotus
- uses: actions/upload-artifact@v4
with:
name: lotus-${{ matrix.os }}-${{ matrix.arch }}
path: |
lotus
lotus-miner
lotus-worker
release:
name: Release [publish=${{ startsWith(github.ref, 'refs/tags/') }}]
permissions:
# This enables the job to create and/or update GitHub releases
contents: write
runs-on: ubuntu-latest
needs: [build]
env:
PUBLISH: ${{ startsWith(github.ref, 'refs/tags/') }}
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
fetch-depth: 0
- uses: actions/download-artifact@v4
with:
name: lotus-Linux-X64
path: linux_amd64_v1
- uses: actions/download-artifact@v4
with:
name: lotus-macOS-X64
path: darwin_amd64_v1
- uses: actions/download-artifact@v4
with:
name: lotus-macOS-ARM64
path: darwin_arm64
- uses: ./.github/actions/install-go
- uses: ipfs/download-ipfs-distribution-action@v1
with:
name: kubo
version: v0.16.0
- uses: goreleaser/goreleaser-action@7ec5c2b0c6cdda6e8bbb49444bc797dd33d74dd8 # v5.0.0
with:
distribution: goreleaser-pro
version: latest
args: release --clean --debug ${{ env.PUBLISH == 'false' && '--snapshot' || '' }}
env:
GITHUB_TOKEN: ${{ env.PUBLISH == 'true' && github.token || '' }}
GORELEASER_KEY: ${{ env.PUBLISH == 'true' && secrets.GORELEASER_KEY || '' }}
- run: ./scripts/generate-checksums.sh
- if: env.PUBLISH == 'true'
env:
GITHUB_TOKEN: ${{ github.token }}
run: ./scripts/publish-checksums.sh

32
.github/workflows/sorted-pr-checks.yml vendored Normal file
View File

@ -0,0 +1,32 @@
name: Comment with sorted PR checks
on:
workflow_dispatch:
inputs:
pull_number:
description: 'Pull request number'
required: true
workflow_run:
workflows:
- Build
- Check
- CodeQL
- Test
types:
- requested
- completed
permissions:
pull-requests: write
concurrency:
group: ${{ github.workflow }}-${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number || 'unknown' }}
cancel-in-progress: true
jobs:
comment:
if: github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0]
uses: ipdxco/sorted-pr-checks/.github/workflows/comment.yml@v1
with:
pull_number: ${{ github.event.inputs.pull_number || github.event.workflow_run.pull_requests[0].number }}
template: unsuccessful_only

View File

@ -4,18 +4,18 @@ on:
schedule: schedule:
- cron: '0 12 * * *' - cron: '0 12 * * *'
permissions: {}
jobs: jobs:
stale: stale:
runs-on: ubuntu-latest
permissions: permissions:
issues: write issues: write
pull-requests: write pull-requests: write
runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@v3 - uses: actions/stale@v9
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ github.token }}
stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.' stale-issue-message: 'Oops, seems like we needed more information for this issue, please comment with more details or this issue will be closed in 24 hours.'
close-issue-message: 'This issue was closed because it is missing author input.' close-issue-message: 'This issue was closed because it is missing author input.'
stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!' stale-pr-message: 'Thank you for submitting the PR and contributing to lotus! Lotus maintainers need more of your input before merging it, please address the suggested changes or reply to the comments or this PR will be closed in 48 hours. You are always more than welcome to reopen the PR later as well!'
@ -29,5 +29,3 @@ jobs:
days-before-pr-close: 2 days-before-pr-close: 2
remove-stale-when-updated: true remove-stale-when-updated: true
enable-statistics: true enable-statistics: true

View File

@ -1,13 +1,19 @@
name: sync-master-main name: sync-master-main
on: on:
push: push:
branches: branches:
- master - master
permissions: {}
jobs: jobs:
sync: sync:
permissions:
contents: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- name: update remote branch main - name: update remote branch main
run: | run: |
# overrides the remote branch (origin:github) `main` # overrides the remote branch (origin:github) `main`

306
.github/workflows/test.yml vendored Normal file
View File

@ -0,0 +1,306 @@
name: Test
on:
pull_request:
push:
branches:
- master
- release/*
workflow_dispatch:
defaults:
run:
shell: bash
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: ${{ github.event_name == 'pull_request' }}
permissions: {}
jobs:
discover:
name: Discover Test Groups
runs-on: ubuntu-latest
outputs:
groups: ${{ steps.test.outputs.groups }}
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- id: test
env:
# Unit test groups other than unit-rest
utests: |
[
{"name": "unit-cli", "packages": ["./cli/...", "./cmd/...", "./api/..."]},
{"name": "unit-storage", "packages": ["./storage/...", "./extern/..."]},
{"name": "unit-node", "packages": ["./node/..."]}
]
# Other tests that require special configuration
otests: |
[
{
"name": "multicore-sdr",
"packages": ["./storage/sealer/ffiwrapper"],
"go_test_flags": "-run=TestMulticoreSDR",
"test_rustproofs_logs": "1"
}, {
"name": "conformance",
"packages": ["./conformance"],
"go_test_flags": "-run=TestConformance",
"skip_conformance": "0"
}
]
# Mapping from test group names to custom runner labels
# The jobs default to running on the default hosted runners (4 CPU, 16 RAM).
# We use self-hosted xlarge (4 CPU, 8 RAM; and large - 2 CPU, 4 RAM) runners
# to extend the available runner capacity (60 default hosted runners).
# We use self-hosted 4xlarge (16 CPU, 32 RAM; and 2xlarge - 8 CPU, 16 RAM) self-hosted
# to support resource intensive jobs.
runners: |
{
"itest-deals_concurrent": ["self-hosted", "linux", "x64", "4xlarge"],
"itest-sector_pledge": ["self-hosted", "linux", "x64", "4xlarge"],
"itest-worker": ["self-hosted", "linux", "x64", "4xlarge"],
"itest-gateway": ["self-hosted", "linux", "x64", "2xlarge"],
"itest-sector_import_full": ["self-hosted", "linux", "x64", "2xlarge"],
"itest-sector_import_simple": ["self-hosted", "linux", "x64", "2xlarge"],
"itest-wdpost": ["self-hosted", "linux", "x64", "2xlarge"],
"unit-storage": ["self-hosted", "linux", "x64", "2xlarge"],
"itest-batch_deal": ["self-hosted", "linux", "x64", "xlarge"],
"itest-cli": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_512mb": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_anycid": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_invalid_utf8_label": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_max_staging_deals": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_partial_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_publish": ["self-hosted", "linux", "x64", "xlarge"],
"itest-deals_remote_retrieval": ["self-hosted", "linux", "x64", "xlarge"],
"itest-decode_params": ["self-hosted", "linux", "x64", "xlarge"],
"itest-dup_mpool_messages": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_account_abstraction": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_api": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_balance": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_bytecode": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_config": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_conformance": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_deploy": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_fee_history": ["self-hosted", "linux", "x64", "xlarge"],
"itest-eth_transactions": ["self-hosted", "linux", "x64", "xlarge"],
"itest-fevm_address": ["self-hosted", "linux", "x64", "xlarge"],
"itest-fevm_events": ["self-hosted", "linux", "x64", "xlarge"],
"itest-gas_estimation": ["self-hosted", "linux", "x64", "xlarge"],
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
"itest-msgindex": ["self-hosted", "linux", "x64", "xlarge"],
"itest-multisig": ["self-hosted", "linux", "x64", "xlarge"],
"itest-net": ["self-hosted", "linux", "x64", "xlarge"],
"itest-nonce": ["self-hosted", "linux", "x64", "xlarge"],
"itest-path_detach_redeclare": ["self-hosted", "linux", "x64", "xlarge"],
"itest-pending_deal_allocation": ["self-hosted", "linux", "x64", "xlarge"],
"itest-remove_verifreg_datacap": ["self-hosted", "linux", "x64", "xlarge"],
"itest-sector_miner_collateral": ["self-hosted", "linux", "x64", "xlarge"],
"itest-sector_numassign": ["self-hosted", "linux", "x64", "xlarge"],
"itest-self_sent_txn": ["self-hosted", "linux", "x64", "xlarge"],
"itest-verifreg": ["self-hosted", "linux", "x64", "xlarge"],
"multicore-sdr": ["self-hosted", "linux", "x64", "xlarge"],
"unit-node": ["self-hosted", "linux", "x64", "xlarge"]
}
# A list of test groups that require YugabyteDB to be running
yugabytedb: |
["itest-harmonydb", "itest-harmonytask", "itest-curio"]
# A list of test groups that require Proof Parameters to be fetched
parameters: |
[
"conformance",
"itest-api",
"itest-deals_offline",
"itest-deals_padding",
"itest-deals_partial_retrieval_dm-level",
"itest-deals_pricing",
"itest-deals",
"itest-direct_data_onboard_verified",
"itest-direct_data_onboard",
"itest-net",
"itest-path_detach_redeclare",
"itest-path_type_filters",
"itest-sealing_resources",
"itest-sector_finalize_early",
"itest-sector_import_full",
"itest-sector_import_simple",
"itest-sector_pledge",
"itest-sector_unseal",
"itest-wdpost_no_miner_storage",
"itest-wdpost_worker_config",
"itest-wdpost",
"itest-worker_upgrade",
"itest-worker",
"multicore-sdr",
"unit-cli",
"unit-storage"
]
run: |
# Create a list of integration test groups
itests="$(
find ./itests -name "*_test.go" | \
jq -R '{
"name": "itest-\(. | split("/") | .[2] | sub("_test.go$";""))",
"packages": [.]
}' | jq -s
)"
# Create a list of packages that are covered by the integration and unit tests
packages="$(jq -n --argjson utests "$utests" '$utests | map(.packages) | flatten | . + ["./itests/..."]')"
# Create a new group for the unit tests that are not yet covered
rest="$(
find . -name "*_test.go" | cut -d/ -f2 | sort | uniq | \
jq -R '"./\(.)/..."' | \
jq -s --argjson p "$packages" '{"name": "unit-rest", "packages": (. - $p)}'
)"
# Combine the groups for integration tests, unit tests, the new unit-rest group, and the other tests
groups="$(jq -n --argjson i "$itests" --argjson u "$utests" --argjson r "$rest" --argjson o "$otests" '$i + $u + [$r] + $o')"
# Apply custom runner labels to the groups
groups="$(jq -n --argjson g "$groups" --argjson r "$runners" '$g | map(. + {"runner": (.name as $n | $r | .[$n]) })')"
# Apply the needs_yugabytedb flag to the groups
groups="$(jq -n --argjson g "$groups" --argjson y "$yugabytedb" '$g | map(. + {"needs_yugabytedb": ([.name] | inside($y)) })')"
# Apply the needs_parameters flag to the groups
groups="$(jq -n --argjson g "$groups" --argjson p "$parameters" '$g | map(. + {"needs_parameters": ([.name] | inside($p)) })')"
# Output the groups
echo "groups=$groups"
echo "groups=$(jq -nc --argjson g "$groups" '$g')" >> $GITHUB_OUTPUT
cache:
name: Cache Dependencies
runs-on: ubuntu-latest
outputs:
fetch_params_key: ${{ steps.fetch_params.outputs.key }}
fetch_params_path: ${{ steps.fetch_params.outputs.path }}
make_deps_key: ${{ steps.make_deps.outputs.key }}
make_deps_path: ${{ steps.make_deps.outputs.path }}
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- id: fetch_params
env:
CACHE_KEY: fetch-params-${{ hashFiles('./extern/filecoin-ffi/parameters.json') }}
CACHE_PATH: |
/var/tmp/filecoin-proof-parameters/
run: |
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
- id: make_deps
env:
CACHE_KEY: ${{ runner.os }}-${{ runner.arch }}-make-deps-${{ hashFiles('./extern/filecoin-ffi/install-filcrypto') }}-${{ hashFiles('./extern/filecoin-ffi/rust/rustc-target-features-optimized.json') }}
CACHE_PATH: |
./extern/filecoin-ffi/filcrypto.h
./extern/filecoin-ffi/libfilcrypto.a
./extern/filecoin-ffi/filcrypto.pc
run: |
echo -e "key=$CACHE_KEY" | tee -a $GITHUB_OUTPUT
echo -e "path<<EOF\n$CACHE_PATH\nEOF" | tee -a $GITHUB_OUTPUT
- id: restore_fetch_params
uses: actions/cache/restore@v4
with:
key: ${{ steps.fetch_params.outputs.key }}
path: ${{ steps.fetch_params.outputs.path }}
lookup-only: true
- id: restore_make_deps
uses: actions/cache/restore@v4
with:
key: ${{ steps.make_deps.outputs.key }}
path: ${{ steps.make_deps.outputs.path }}
lookup-only: true
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
uses: ./.github/actions/install-system-dependencies
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
uses: ./.github/actions/install-go
- if: steps.restore_fetch_params.outputs.cache-hit != 'true' || steps.restore_make_deps.outputs.cache-hit != 'true'
env:
GITHUB_TOKEN: ${{ github.token }}
run: make deps
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
run: make lotus
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
run: ./lotus fetch-params 2048
- if: steps.restore_fetch_params.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
key: ${{ steps.fetch_params.outputs.key }}
path: ${{ steps.fetch_params.outputs.path }}
- if: steps.restore_make_deps.outputs.cache-hit != 'true'
uses: actions/cache/save@v4
with:
key: ${{ steps.make_deps.outputs.key }}
path: ${{ steps.make_deps.outputs.path }}
test:
needs: [discover, cache]
name: Test (${{ matrix.name }})
runs-on: ${{ github.repository == 'filecoin-project/lotus' && matrix.runner || 'ubuntu-latest' }}
strategy:
fail-fast: false
matrix:
include: ${{ fromJson(needs.discover.outputs.groups) }}
steps:
- uses: actions/checkout@v4
with:
submodules: 'recursive'
- uses: ./.github/actions/install-system-dependencies
- uses: ./.github/actions/install-go
- run: go install gotest.tools/gotestsum@latest
- name: Restore cached make deps outputs
uses: actions/cache/restore@v4
with:
key: ${{ needs.cache.outputs.make_deps_key }}
path: ${{ needs.cache.outputs.make_deps_path }}
fail-on-cache-miss: true
- if: ${{ matrix.needs_parameters }}
name: Restore cached fetch params outputs
uses: actions/cache/restore@v4
with:
key: ${{ needs.cache.outputs.fetch_params_key }}
path: ${{ needs.cache.outputs.fetch_params_path }}
fail-on-cache-miss: true
- if: ${{ matrix.needs_yugabytedb }}
uses: ./.github/actions/start-yugabytedb
timeout-minutes: 3
# TODO: Install statediff (used to be used for conformance)
- id: reports
run: mktemp -d | xargs -0 -I{} echo "path={}" | tee -a $GITHUB_OUTPUT
# TODO: Track coverage (used to be tracked for conformance)
- env:
NAME: ${{ matrix.name }}
LOTUS_SRC_DIR: ${{ github.workspace }}
LOTUS_HARMONYDB_HOSTS: 127.0.0.1
REPORTS_PATH: ${{ steps.reports.outputs.path }}
SKIP_CONFORMANCE: ${{ matrix.skip_conformance || '1' }}
TEST_RUSTPROOFS_LOGS: ${{ matrix.test_rustproofs_logs || '0' }}
FORMAT: ${{ matrix.format || 'standard-verbose' }}
PACKAGES: ${{ join(matrix.packages, ' ') }}
run: |
gotestsum \
--format "$FORMAT" \
--junitfile "$REPORTS_PATH/$NAME.xml" \
--jsonfile "$REPORTS_PATH/$NAME.json" \
--packages="$PACKAGES" \
-- ${{ matrix.go_test_flags || '' }}
- if: success() || failure()
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.name }}
path: |
${{ steps.reports.outputs.path }}/${{ matrix.name }}.xml
${{ steps.reports.outputs.path }}/${{ matrix.name }}.json
continue-on-error: true

View File

@ -1,29 +0,0 @@
---
name: Testground PR Checker
on: [push]
jobs:
testground:
runs-on: ubuntu-latest
name: ${{ matrix.composition_file }}
strategy:
matrix:
include:
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
steps:
- uses: actions/checkout@v2
- name: testground run
uses: testground/testground-github-action@v1
with:
backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }}
plan_directory: ${{ matrix.plan_directory }}
composition_file: ${{ matrix.composition_file }}

6
.gitignore vendored
View File

@ -6,7 +6,8 @@
/lotus-chainwatch /lotus-chainwatch
/lotus-shed /lotus-shed
/lotus-sim /lotus-sim
/lotus-provider /curio
/sptool
/lotus-townhall /lotus-townhall
/lotus-fountain /lotus-fountain
/lotus-stats /lotus-stats
@ -36,6 +37,9 @@ build/paramfetch.sh
/darwin /darwin
/linux /linux
*.snap *.snap
devgen.car
localnet.json
/*.ndjson
*-fuzz.zip *-fuzz.zip
/chain/types/work_msg/ /chain/types/work_msg/

View File

@ -5,15 +5,12 @@ linters:
- govet - govet
- goimports - goimports
- misspell - misspell
- goconst - revive
- golint
- errcheck - errcheck
- gosec - gosec
- unconvert - unconvert
- staticcheck - staticcheck
- varcheck - exportloopref
- deadcode
- scopelint
- unused - unused
# We don't want to skip builtin/ # We don't want to skip builtin/
@ -25,37 +22,36 @@ skip-dirs:
issues: issues:
exclude: exclude:
- "by other packages, and that stutters; consider calling this" # gosec
- "Potential file inclusion via variable" - "^G101: Potential hardcoded credentials"
- "should have( a package)? comment" - "^G108: Profiling endpoint is automatically exposed on /debug/pprof"
- "Error return value of `logging.SetLogLevel` is not checked" - "^G204: Subprocess launched with (variable|a potential tainted input or cmd arguments)"
- "comment on exported" - "^G301: Expect directory permissions to be 0750 or less"
- "(func|method) \\w+ should be \\w+" - "^G302: Expect file permissions to be 0600 or less"
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`" - "^G304: Potential file inclusion via variable"
- "(G306|G301|G307|G108|G302|G204|G104)" - "^G306: Expect WriteFile permissions to be 0600 or less"
- "don't use ALL_CAPS in Go names" - "^G404: Use of weak random number generator"
- "string .* has .* occurrences, make it a constant" # staticcheck
- "a blank import should be only in a main or test package, or have a comment justifying it" - "^SA1019: xerrors.* is deprecated: As of Go 1.13, use errors"
- "package comment should be of the form" # revive
- "Potential hardcoded credentials" - "^blank-imports: a blank import should be only in a main or test package, or have a comment justifying it"
- "Use of weak random number generator" - "^dot-imports: should not use dot imports"
- "xerrors.* is deprecated" - "^exported: (func|type) name will be used as [^\\s]+ by other packages, and that stutters; consider calling this \\w+"
- "^exported: comment on exported (const|function|method|type|var) [^\\s]+ should be of the form \"\\w+ ...\""
- "^exported: exported (const|function|method|type|var) [^\\s]+ should have comment (\\(or a comment on this block\\) )?or be unexported"
- "^indent-error-flow: if block ends with a return statement, so drop this else and outdent its block \\(move short variable declaration to its own line if necessary\\)"
- "^package-comments: package comment should be of the form \"Package \\w+ ...\""
- "^package-comments: should have a package comment"
- "^unexported-return: exported func \\w+ returns unexported type [^\\s]+, which can be annoying to use"
- "^unused-parameter: parameter '\\w+' seems to be unused, consider removing or renaming it as _"
- "^var-naming: (const|func|type|var|struct field|(method|func|interface method) parameter) [A-Z]\\w+ should be"
- "^var-naming: (method|range var) \\w*(Api|Http|Id|Rpc|Url)[^\\s]* should be \\w*(API|HTTP|ID|RPC|URL)"
- "^var-naming: don't use underscores in Go names"
- "^var-naming: don't use ALL_CAPS in Go names; use CamelCase"
exclude-use-default: false exclude-use-default: false
exclude-rules: exclude-rules:
- path: node/modules/lp2p
linters:
- golint
- path: build/params_.*\.go
linters:
- golint
- path: api/apistruct/struct.go
linters:
- golint
- path: .*_test.go - path: .*_test.go
linters: linters:
- gosec - gosec
@ -67,12 +63,3 @@ issues:
- path: cmd/lotus-bench/.* - path: cmd/lotus-bench/.*
linters: linters:
- gosec - gosec
- path: api/test/.*
text: "context.Context should be the first parameter"
linters:
- golint
linters-settings:
goconst:
min-occurrences: 6

View File

@ -27,7 +27,7 @@ builds:
- goos: linux - goos: linux
goarch: arm64 goarch: arm64
prebuilt: prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus'
- id: lotus-miner - id: lotus-miner
binary: lotus-miner binary: lotus-miner
builder: prebuilt builder: prebuilt
@ -43,7 +43,7 @@ builds:
- goos: linux - goos: linux
goarch: arm64 goarch: arm64
prebuilt: prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-miner'
- id: lotus-worker - id: lotus-worker
binary: lotus-worker binary: lotus-worker
builder: prebuilt builder: prebuilt
@ -59,7 +59,7 @@ builds:
- goos: linux - goos: linux
goarch: arm64 goarch: arm64
prebuilt: prebuilt:
path: /tmp/workspace/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker path: '{{ .Env.GITHUB_WORKSPACE }}/{{ .Os }}_{{ .Arch }}{{ with .Amd64 }}_{{ . }}{{ end }}/lotus-worker'
archives: archives:
- id: primary - id: primary

View File

@ -6,6 +6,219 @@
## Improvements ## Improvements
# v1.27.0 / 2024-05-27
This is an optional feature release of Lotus. Lotus v1.27.0 includes numerous improvements, bugfixes and enhancements for node operators, RPC- and ETH RPC-providers. This feature release also introduces Curio in a Beta release. Check out the Curio Beta release section for how you can get started with Curio.
## ☢️ Upgrade Warnings ☢️
- This feature release drops the Raft cluster code experiment from the codebase. This Raft cluster never graduated beyond an experiment, had poor UX (e.g. no way to manage a running cluster, so it didn't provide High Availability), and pulled in a lot of heavy dependencies. We keep the multi-node RPC feature, it is not perfect, but it is useful.
- Event Database: Two sequential migrations will adjust indexes without altering data or columns, ensuring minimal invasiveness when upgrading to this release. However, these migrations may be time-consuming for nodes with extensive event databases.
## Indexers, RPC- and ETH RPC-providers improvements
This release includes a lot of improvements and fixes for indexers, RPC- and ETH RPC-providers. Specifically these PRs:
- [Significant performance improvements of eth_getLog](https://github.com/filecoin-project/lotus/pull/11477)
- [Return the correct block gas limit in the EthAP](https://github.com/filecoin-project/lotus/pull/11747)
- [Accept input data in call arguments under field 'input'](https://github.com/filecoin-project/lotus/pull/11505)
- [Length check the array sent to eth_feeHistory RPC](https://github.com/filecoin-project/lotus/pull/11696)
- [ETH subscribe tipsets API should only return tipsets that have been executed](https://github.com/filecoin-project/lotus/pull/11858)
- [Adjust indexes in event index db to match query patterns](https://github.com/filecoin-project/lotus/pull/111934)
-
## ⭐️ Curio Beta Release ⭐️
**Curio**, the next generation of Lotus-Miner, also referred to as MinerV2! This release officially transitions Curio into beta and introduces a suite of powerful features designed to enhance your storage operations.
### Highlights
- **Curio as MinerV2**: Embrace the revolutionary upgrade from Lotus-Miner to Curio. This transition is not just a rebranding—it's an upgrade to a more robust, scalable, and user-friendly version.
- **High Availability**: Curio is designed for high availability. You can run multiple instances of Curio nodes to handle similar type of tasks. The distributed scheduler and greedy worker design will ensure that tasks are completed on time despite most partial outages. You can safely update one of your Curio machines without disrupting the operation of the others.
- **Node Heartbeat**: Each Curio node in a cluster must post a heartbeat message every 10 minutes in HarmonyDB updating its status. If a heartbeat is missed, the node is considered lost and all tasks can now be scheduled on remaining nodes.
- **Task Retry**: Each task in Curio has a limit on how many times it should be tried before being declared lost. This ensures that Curio does not keep retrying bad tasks indefinitely. This safeguards against lost computation time and storage.
- **Polling**: Curio avoids overloading nodes with a polling system. Nodes check for tasks they can handle, prioritizing idle nodes for even workload distribution.
- **Simple Configuration Management**: The configuration is stored in the database in the forms of layers. These layers can be stacked on top of each other to create a final configuration. Users can reuse these layers to control the behavior of multiple machines without needing to maintain the configuration of each node. Start the binary with the appropriate flags to connect with YugabyteDB and specify which configuration layers to use to get desired behaviour.
### Getting Started with Curio
```bash
cd lotus
git pull
make clean deps all
sudo make install
```
On your local-dev-net or calibrationnet lotus-miner machine, initiate:
`curio guided-setup`
### Need More Info?
For detailed documentation and additional information on Curio:
Curio Overview <- insert link
Visit the Curio Official Website insert link
❗Curio is in Beta state, and we recommend our users to run Curio in a testing environment or on the Calibration network for the time being.
## New features
- feat: exchange: change GetBlocks to always fetch the requested number of tipsets ([filecoin-project/lotus#11565](https://github.com/filecoin-project/lotus/pull/11565))
- feat: syncer: optimize syncFork for one-epoch forks ([filecoin-project/lotus#11533](https://github.com/filecoin-project/lotus/pull/11533))
- feat: api: improve the correctness of Eth's trace_block (#11609) ([filecoin-project/lotus#11609](https://github.com/filecoin-project/lotus/pull/11609))
- perf: api: add indexes to event topics and emitter addr (#11477) ([filecoin-project/lotus#11477](https://github.com/filecoin-project/lotus/pull/11477))
- feat: drand: refactor round verification ([filecoin-project/lotus#11598](https://github.com/filecoin-project/lotus/pull/11598))
- feat: add Forest bootstrap nodes (#11636) ([filecoin-project/lotus#11636](https://github.com/filecoin-project/lotus/pull/11636))
- feat: curio: add miner init (#11775) ([filecoin-project/lotus#11775](https://github.com/filecoin-project/lotus/pull/11775))
- feat: curio: sectors UI (#11869) ([filecoin-project/lotus#11869](https://github.com/filecoin-project/lotus/pull/11869))
- feat: curio: storage index gc task (#11884) ([filecoin-project/lotus#11884](https://github.com/filecoin-project/lotus/pull/11884))
- feat: curio: web based config edit (#11822) ([filecoin-project/lotus#11822](https://github.com/filecoin-project/lotus/pull/11822))
- feat: spcli: sectors extend improvements (#11798) ([filecoin-project/lotus#11798](https://github.com/filecoin-project/lotus/pull/11798))
- feat: curio: Add schemas for DDO deal support (#11805) ([filecoin-project/lotus#11805](https://github.com/filecoin-project/lotus/pull/11805))
- feat: curioweb: add favicon (#11804) ([filecoin-project/lotus#11804](https://github.com/filecoin-project/lotus/pull/11804))
- feat: lotus-provider: Fetch params on startup when needed ([filecoin-project/lotus#11650](https://github.com/filecoin-project/lotus/pull/11650))
- feat: mpool: Cache actors in lite mode (#11668) ([filecoin-project/lotus#11668](https://github.com/filecoin-project/lotus/pull/11668))
- feat: curio: simpler reservation release logic (#11900) ([filecoin-project/lotus#11900](https://github.com/filecoin-project/lotus/pull/11900))
- feat: curio: add StorageInit api (#11918) ([filecoin-project/lotus#11918](https://github.com/filecoin-project/lotus/pull/11918))
- feat: lotus-provider: SDR Sealing pipeline ([filecoin-project/lotus#11534](https://github.com/filecoin-project/lotus/pull/11534))
- feat: curioweb: Sector info page (#11846) ([filecoin-project/lotus#11846](https://github.com/filecoin-project/lotus/pull/11846))
- feat: curio web: node info page (#11745) ([filecoin-project/lotus#11745](https://github.com/filecoin-project/lotus/pull/11745))
- feat: fvm: optimize FVM lanes a bit (#11875) ([filecoin-project/lotus#11875](https://github.com/filecoin-project/lotus/pull/11875))
- feat: Gateway API: Add ETH -> FIL and FIL -> ETH address conversion APIs to the Gateway (#11979) ([filecoin-project/lotus#11979](https://github.com/filecoin-project/lotus/pull/11979))
## Improvements
- fix: api: return the correct block gas limit in the EthAPI (#11747) ([filecoin-project/lotus#11747](https://github.com/filecoin-project/lotus/pull/11747))
- fix: exchange: explicitly cast the block message limit const (#11511) ([filecoin-project/lotus#11511](https://github.com/filecoin-project/lotus/pull/11511))
- fix: Eth API: accept input data in call arguments under field 'input' (#11505) ([filecoin-project/lotus#11505](https://github.com/filecoin-project/lotus/pull/11505))
- fix: api: Length check the array sent to eth_feeHistory RPC (#11696) ([filecoin-project/lotus#11696](https://github.com/filecoin-project/lotus/pull/11696))
- fix: api: fix EthSubscribe tipsets off by one (#11858) ([filecoin-project/lotus#11858](https://github.com/filecoin-project/lotus/pull/11858))
- fix: lotus-provider: Fix log output format in wdPostTaskCmd ([filecoin-project/lotus#11504](https://github.com/filecoin-project/lotus/pull/11504))
- fix: lmcli: make 'sectors list' DDO-aware (#11839) ([filecoin-project/lotus#11839](https://github.com/filecoin-project/lotus/pull/11839))
- fix: lpwinning: Fix MiningBase.afterPropDelay ([filecoin-project/lotus#11654](https://github.com/filecoin-project/lotus/pull/11654))
- fix: exchange: allow up to 10k messages per block ([filecoin-project/lotus#11506](https://github.com/filecoin-project/lotus/pull/11506))
- fix: harmony: Fix task reclaim on restart ([filecoin-project/lotus#11498](https://github.com/filecoin-project/lotus/pull/11498))
- fix: lotus-provider: Wait for the correct taskID ([filecoin-project/lotus#11493](https://github.com/filecoin-project/lotus/pull/11493))
- fix: lotus-provider: show addresses in log ([filecoin-project/lotus#11490](https://github.com/filecoin-project/lotus/pull/11490))
- fix: sql Scan cannot write to an object ([filecoin-project/lotus#11485](https://github.com/filecoin-project/lotus/pull/11485))
- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11482](https://github.com/filecoin-project/lotus/pull/11482))
- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480))
- fix: chain: use latest go-state-types types for miner UI ([filecoin-project/lotus#11566](https://github.com/filecoin-project/lotus/pull/11566))
- fix: Dockerfile non-interactive snapshot import (#11579) ([filecoin-project/lotus#11579](https://github.com/filecoin-project/lotus/pull/11579))
- fix: daemon: avoid prompting to remove chain when noninteractive (#11582) ([filecoin-project/lotus#11582](https://github.com/filecoin-project/lotus/pull/11582))
- fix: (events): check for sync-in-progress (#11932) ([filecoin-project/lotus#11932](https://github.com/filecoin-project/lotus/pull/11932))
- fix: curio: common commands (#11879) ([filecoin-project/lotus#11879](https://github.com/filecoin-project/lotus/pull/11879))
- fix: curio: fix incorrect null check for varchar column (#11881) ([filecoin-project/lotus#11881](https://github.com/filecoin-project/lotus/pull/11881))
- fix: local storage reservations fixes (#11866) ([filecoin-project/lotus#11866](https://github.com/filecoin-project/lotus/pull/11866))
- fix: curio: Check deal start epoch passed in PrecommitSubmit (#11873) ([filecoin-project/lotus#11873](https://github.com/filecoin-project/lotus/pull/11873))
- fix: curio: base config by default (#11676) ([filecoin-project/lotus#11676](https://github.com/filecoin-project/lotus/pull/11676))
- fix: curio: Start BoostAdapters before blocking rpc serve (#11871) ([filecoin-project/lotus#11871](https://github.com/filecoin-project/lotus/pull/11871))
- fix: cli: json flag (#11868) ([filecoin-project/lotus#11868](https://github.com/filecoin-project/lotus/pull/11868))
- feat: curio/lmrpc: Ingest backpressure (#11865) ([filecoin-project/lotus#11865](https://github.com/filecoin-project/lotus/pull/11865))
- feat: curio: Cleanup data copies after seal ops (#11847) ([filecoin-project/lotus#11847](https://github.com/filecoin-project/lotus/pull/11847))
- fix: spcli: add reference to the terminate command (#11851) ([filecoin-project/lotus#11851](https://github.com/filecoin-project/lotus/pull/11851))
- fix: sealing: improve gasEstimate logging (#11840) ([filecoin-project/lotus#11840](https://github.com/filecoin-project/lotus/pull/11840))
- fix: harmony: Try other tasks when storage claim fails
- fix: test: TestForkPreMigration hanging when env-var is set (#11838) ([filecoin-project/lotus#11838](https://github.com/filecoin-project/lotus/pull/11838))
- fix: piece: Don't return StartEport in PieceDealInfo.EndEpoch (#11832) ([filecoin-project/lotus#11832](https://github.com/filecoin-project/lotus/pull/11832))
- fix: paths/local: Fix on-disk storage accounting in new reservations (#11825) ([filecoin-project/lotus#11825](https://github.com/filecoin-project/lotus/pull/11825))
- fix: sealing pipeline: Fix panic on padding pieces in WaitDeals (#11708) ([filecoin-project/lotus#11708](https://github.com/filecoin-project/lotus/pull/11708))
- feat: ipfs: remove IPFS client backend (#11661) ([filecoin-project/lotus#11661](https://github.com/filecoin-project/lotus/pull/11661))
- fix: docs: Modify generate-lotus-cli.py to ignoring aliases. ([filecoin-project/lotus#11535](https://github.com/filecoin-project/lotus/pull/11535))
- fix: eth: decode as actor creation iff "to" is the EAM (#11520) ([filecoin-project/lotus#11520](https://github.com/filecoin-project/lotus/pull/11520))
- fix(events): properly decorate events db errors (#11856) ([filecoin-project/lotus#11856](https://github.com/filecoin-project/lotus/pull/11856))
- fix: CLI: adjust TermMax for extend-claim used by a different client (#11764) ([filecoin-project/lotus#11764](https://github.com/filecoin-project/lotus/pull/111764))
- fix: copy Flags field from SectorOnChainInfo (#11963) ([filecoin-project/lotus#11963](https://github.com/filecoin-project/lotus/pull/11963))
- feat: libp2p: Lotus stream cleanup (#11993) ([filecoin-project/lotus#11993](https://github.com/filecoin-project/lotus/pull/11993))
## Dependencies
- chore: update deps (#11819) ([filecoin-project/lotus#11819](https://github.com/filecoin-project/lotus/pull/11819))
- chore: mod: use upstream poseidon ([filecoin-project/lotus#11557](https://github.com/filecoin-project/lotus/pull/11557))
- deps: multiaddress ([filecoin-project/lotus#11558](https://github.com/filecoin-project/lotus/pull/11558))
- chore:libp2p: update libp2p deps in master ([filecoin-project/lotus#11522](https://github.com/filecoin-project/lotus/pull/11522))
- dep: go-multi-address ([filecoin-project/lotus#11563](https://github.com/filecoin-project/lotus/pull/11563))
- chore: update go-data-transfer and go-graphsync (#12000) ([filecoin-project/lotus#12000](https://github.com/filecoin-project/lotus/pull/2000))
- chore: update drand (#12021) ([filecoin-project/lotus#12021](https://github.com/filecoin-project/lotus/pull/12021))
- chore: libp2p: update to v0.34.1 (12027) ([filecoin-project/lotus#12027](https://github.com/filecoin-project/lotus/pull/12027))
- github.com/filecoin-project/go-amt-ipld/ (v4.2.0 -> v4.3.0)
- github.com/filecoin-project/go-state-types (v0.13.1 -> v0.13.3)
- github.com/libp2p/go-libp2p-pubsub (v0.10.0 -> v0.10.1)
- github.com/libp2p/go-libp2p (v0.33.2 -> v0.34.1)
## Others
- ci: ci: create gh workflow that runs go checks (#11761) ([filecoin-project/lotus#11761](https://github.com/filecoin-project/lotus/pull/11761))
- ci: ci: create gh workflow that runs go build (#11760) ([filecoin-project/lotus#11760](https://github.com/filecoin-project/lotus/pull/11760))
- ci: cancel in progress runs on pull requests only (#11842) ([filecoin-project/lotus#11842](https://github.com/filecoin-project/lotus/pull/11842))
- ci: ci: list processes before calling apt-get to enable debugging (#11815) ([filecoin-project/lotus#11815](https://github.com/filecoin-project/lotus/pull/11815))
- ci: ci: allow master main sync to write to the repository (#11784) ([filecoin-project/lotus#11784](https://github.com/filecoin-project/lotus/pull/11784))
- ci: ci: create gh workflow that runs go tests (#11762) ([filecoin-project/lotus#11762](https://github.com/filecoin-project/lotus/pull/11762))
- ci: ci: deprecate circle ci in favour of github actions (#11786) ([filecoin-project/lotus#11786](https://github.com/filecoin-project/lotus/pull/1786))
- misc: Drop the raft-cluster experiment ([filecoin-project/lotus#11468](https://github.com/filecoin-project/lotus/pull/11468))
- chore: fix some typos in comments (#11892) ([filecoin-project/lotus#11892](https://github.com/filecoin-project/lotus/pull/11892))
- chore: fix typos (#11848) ([filecoin-project/lotus#11848](https://github.com/filecoin-project/lotus/pull/11848))
- chore: fix typo (#11697) ([filecoin-project/lotus#11697](https://github.com/filecoin-project/lotus/pull/11697))
- chore: fix 2 typo's (#11542) ([filecoin-project/lotus#11542](https://github.com/filecoin-project/lotus/pull/11542))
- chore: calibnet: Update bootstrap peer list ([filecoin-project/lotus#11672](https://github.com/filecoin-project/lotus/pull/11672))
- chore: build: Bump version in master ([filecoin-project/lotus#11475](https://github.com/filecoin-project/lotus/pull/11475))
- chore: releases: merge releases branch to master ([filecoin-project/lotus#11578](https://github.com/filecoin-project/lotus/pull/11578))
- chore: Add systemd memory note on install and in config (#11641) ([filecoin-project/lotus#11641](https://github.com/filecoin-project/lotus/pull/11641))
- chore: switch back to upstream ledger library (#11651) ([filecoin-project/lotus#11651](https://github.com/filecoin-project/lotus/pull/11651))
- chore: build: update minimum go version to 1.21.7 (#11652) ([filecoin-project/lotus#11652](https://github.com/filecoin-project/lotus/pull/11652))
- chore: docs: nv-skeleton documentation (#11065) ([filecoin-project/lotus#11065](https://github.com/filecoin-project/lotus/pull/11065))
- chore: Add v13 support to invariants-checker (#11931) ([filecoin-project/lotus#11931](https://github.com/filecoin-project/lotus/pull/11931))
- chore: remove unmaintained bootstrappers (#11983) ([filecoin-project/lotus#11983](https://github.com/filecoin-project/lotus/pull/11983))
- chore: go mod: revert go version change as it breaks Docker build (#12050) ([filecoin-project/lotus#12050](https://github.com/filecoin-project/lotus/pull/12050))
## Contributors
| Contributor | Commits | Lines ± | Files Changed |
|-------------|---------|---------|---------------|
| Rod Vagg | 20 | +55315/-204 | 58 |
| Łukasz Magiera | 201 | +16244/-6541 | 647 |
| Andrew Jackson (Ajax) | 53 | +15293/-6764 | 394 |
| Phi-rjan | 6 | +12669/-4521 | 221 |
| LexLuthr | 20 | +5972/-2815 | 120 |
| Steven Allen | 22 | +1626/-1264 | 77 |
| Piotr Galar | 9 | +790/-412 | 33 |
| Aayush Rajasekaran | 4 | +642/-509 | 12 |
| Lee | 1 | +601/-533 | 9 |
| qwdsds | 3 | +617/-510 | 11 |
| Phi | 11 | +551/-83 | 32 |
| Jiaying Wang | 5 | +433/-20 | 13 |
| Masih H. Derkani | 4 | +350/-101 | 18 |
| Aayush | 4 | +143/-76 | 17 |
| Aarsh Shah | 3 | +63/-11 | 5 |
| jennijuju | 3 | +22/-22 | 12 |
| hunjixin | 1 | +21/-14 | 4 |
| beck | 2 | +17/-17 | 2 |
| tom123222 | 2 | +28/-4 | 2 |
| Ian Norden | 1 | +21/-1 | 1 |
| ZenGround0 | 1 | +3/-15 | 1 |
| shuangcui | 1 | +7/-7 | 6 |
| Vid Bregar | 1 | +7/-4 | 2 |
| writegr | 1 | +5/-5 | 5 |
| Nagaprasad V R | 1 | +9/-0 | 1 |
| forcedebug | 1 | +4/-4 | 4 |
| parthshah1 | 2 | +6/-1 | 2 |
| fuyangpengqi | 1 | +3/-3 | 3 |
| Samuel Arogbonlo | 1 | +6/-0 | 2 |
| GlacierWalrus | 1 | +0/-6 | 1 |
| Aloxaf | 1 | +6/-0 | 2 |
| Rob Quist | 2 | +2/-3 | 3 |
| wersfeds | 1 | +2/-2 | 1 |
| Jon | 1 | +2/-0 | 1 |
| 0x5459 | 1 | +1/-0 | 1 |
# v1.26.3 / 2024-04-22
**This is a patch release that addresses high memory load concerns for the Lotus daemon in the coming network migration for network version 22, scheduled on epoch `3855360 - 2024-04-24 - 14:00:00Z`.**
If your Lotus daemon is running on a machine with less memory and swap than 160GB, you should upgrade to this patch release to ensure you do not encounter any Out-Of-Memory issues during the pre-migration.
# v1.26.2 / 2024-04-08 # v1.26.2 / 2024-04-08
**This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.** **This is a mandatory patch release for the Filecoin network version 22 mainnet upgrade, for all node operators.**
@ -74,6 +287,7 @@ The Filecoin network version 22 delivers the following FIPs:
lotus state actor-cids --network-version=22 lotus state actor-cids --network-version=22
Network Version: 22 Network Version: 22
Actor Version: 13 Actor Version: 13
Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e Manifest CID: bafy2bzacecdhvfmtirtojwhw2tyciu4jkbpsbk5g53oe24br27oy62sn4dc4e
Actor CID Actor CID
@ -3751,7 +3965,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
- Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697)) - Config for deal publishing control addresses ([filecoin-project/lotus#6697](https://github.com/filecoin-project/lotus/pull/6697))
- Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses). - Set `DealPublishControl` to set the wallet used for sending `PublishStorageDeals` messages, instructions [here](https://lotus.filecoin.io/storage-providers/operate/addresses/#control-addresses).
- Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848)) - Config UX improvements ([filecoin-project/lotus#6848](https://github.com/filecoin-project/lotus/pull/6848))
- You can now preview the the default and updated node config by running `lotus/lotus-miner config default/updated` - You can now preview the default and updated node config by running `lotus/lotus-miner config default/updated`
## New Features ## New Features
- ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356)) - ⭐️⭐️⭐️ Support standalone miner-market process ([filecoin-project/lotus#6356](https://github.com/filecoin-project/lotus/pull/6356))
@ -3858,6 +4072,7 @@ This is a **highly recommended** but optional Lotus v1.11.1 release that introd
- Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709)) - Release Template: remove binary validation step ([filecoin-project/lotus#6709](https://github.com/filecoin-project/lotus/pull/6709))
- Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689)) - Reset of the interop network ([filecoin-project/lotus#6689](https://github.com/filecoin-project/lotus/pull/6689))
- Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621)) - Update version.go to 1.11.1 ([filecoin-project/lotus#6621](https://github.com/filecoin-project/lotus/pull/6621))
- chore: pin golanglint-ci to v1.58.2 ([filecoin-project/lotus#12054](https://github.com/filecoin-project/lotus/pull/12054))
## Contributors ## Contributors
@ -5198,7 +5413,7 @@ This consensus-breaking release of Lotus upgrades the actors version to v2.0.0.
#### Mining #### Mining
- Increased ExpectedSealDuration and and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743) - Increased ExpectedSealDuration and WaitDealsDelay (https://github.com/filecoin-project/lotus/pull/3743)
- Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133) - Miner backup/restore commands (https://github.com/filecoin-project/lotus/pull/4133)
- lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961) - lotus-miner: add more help text to storage / attach (https://github.com/filecoin-project/lotus/pull/3961)
- Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173) - Reject deals that are > 7 days in the future in the BasicDealFilter (https://github.com/filecoin-project/lotus/pull/4173)

View File

@ -73,7 +73,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-shed /usr/local/bin/
COPY scripts/docker-lotus-entrypoint.sh / COPY scripts/docker-lotus-entrypoint.sh /
ARG DOCKER_LOTUS_IMPORT_SNAPSHOT https://snapshots.mainnet.filops.net/minimal/latest ARG DOCKER_LOTUS_IMPORT_SNAPSHOT=https://forest-archive.chainsafe.dev/latest/mainnet/
ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT} ENV DOCKER_LOTUS_IMPORT_SNAPSHOT ${DOCKER_LOTUS_IMPORT_SNAPSHOT}
ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters
ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_PATH /var/lib/lotus
@ -109,7 +109,7 @@ COPY --from=lotus-builder /opt/filecoin/lotus-wallet /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-gateway /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-miner /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-worker /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-provider /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/curio /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-stats /usr/local/bin/
COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/ COPY --from=lotus-builder /opt/filecoin/lotus-fountain /usr/local/bin/
@ -118,13 +118,13 @@ RUN mkdir /var/lib/lotus
RUN mkdir /var/lib/lotus-miner RUN mkdir /var/lib/lotus-miner
RUN mkdir /var/lib/lotus-worker RUN mkdir /var/lib/lotus-worker
RUN mkdir /var/lib/lotus-wallet RUN mkdir /var/lib/lotus-wallet
RUN mkdir /var/lib/lotus-provider RUN mkdir /var/lib/curio
RUN chown fc: /var/tmp/filecoin-proof-parameters RUN chown fc: /var/tmp/filecoin-proof-parameters
RUN chown fc: /var/lib/lotus RUN chown fc: /var/lib/lotus
RUN chown fc: /var/lib/lotus-miner RUN chown fc: /var/lib/lotus-miner
RUN chown fc: /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-worker
RUN chown fc: /var/lib/lotus-wallet RUN chown fc: /var/lib/lotus-wallet
RUN chown fc: /var/lib/lotus-provider RUN chown fc: /var/lib/curio
VOLUME /var/tmp/filecoin-proof-parameters VOLUME /var/tmp/filecoin-proof-parameters
@ -132,7 +132,7 @@ VOLUME /var/lib/lotus
VOLUME /var/lib/lotus-miner VOLUME /var/lib/lotus-miner
VOLUME /var/lib/lotus-worker VOLUME /var/lib/lotus-worker
VOLUME /var/lib/lotus-wallet VOLUME /var/lib/lotus-wallet
VOLUME /var/lib/lotus-provider VOLUME /var/lib/curio
EXPOSE 1234 EXPOSE 1234
EXPOSE 2345 EXPOSE 2345

125
Makefile
View File

@ -66,7 +66,7 @@ CLEAN+=build/.update-modules
deps: $(BUILD_DEPS) deps: $(BUILD_DEPS)
.PHONY: deps .PHONY: deps
build-devnets: build lotus-seed lotus-shed lotus-provider build-devnets: build lotus-seed lotus-shed curio sptool
.PHONY: build-devnets .PHONY: build-devnets
debug: GOFLAGS+=-tags=debug debug: GOFLAGS+=-tags=debug
@ -97,14 +97,20 @@ lotus-miner: $(BUILD_DEPS)
.PHONY: lotus-miner .PHONY: lotus-miner
BINS+=lotus-miner BINS+=lotus-miner
lotus-provider: $(BUILD_DEPS) curio: $(BUILD_DEPS)
rm -f lotus-provider rm -f curio
$(GOCC) build $(GOFLAGS) -o lotus-provider ./cmd/lotus-provider $(GOCC) build $(GOFLAGS) -o curio ./cmd/curio
.PHONY: lotus-provider .PHONY: curio
BINS+=lotus-provider BINS+=curio
lp2k: GOFLAGS+=-tags=2k cu2k: GOFLAGS+=-tags=2k
lp2k: lotus-provider cu2k: curio
sptool: $(BUILD_DEPS)
rm -f sptool
$(GOCC) build $(GOFLAGS) -o sptool ./cmd/sptool
.PHONY: sptool
BINS+=sptool
lotus-worker: $(BUILD_DEPS) lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker rm -f lotus-worker
@ -124,13 +130,13 @@ lotus-gateway: $(BUILD_DEPS)
.PHONY: lotus-gateway .PHONY: lotus-gateway
BINS+=lotus-gateway BINS+=lotus-gateway
build: lotus lotus-miner lotus-worker lotus-provider build: lotus lotus-miner lotus-worker curio sptool
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \ @[[ $$(type -P "lotus") ]] && echo "Caution: you have \
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
.PHONY: build .PHONY: build
install: install-daemon install-miner install-worker install-provider install: install-daemon install-miner install-worker install-curio install-sptool
install-daemon: install-daemon:
install -C ./lotus /usr/local/bin/lotus install -C ./lotus /usr/local/bin/lotus
@ -138,8 +144,11 @@ install-daemon:
install-miner: install-miner:
install -C ./lotus-miner /usr/local/bin/lotus-miner install -C ./lotus-miner /usr/local/bin/lotus-miner
install-provider: install-curio:
install -C ./lotus-provider /usr/local/bin/lotus-provider install -C ./curio /usr/local/bin/curio
install-sptool:
install -C ./sptool /usr/local/bin/sptool
install-worker: install-worker:
install -C ./lotus-worker /usr/local/bin/lotus-worker install -C ./lotus-worker /usr/local/bin/lotus-worker
@ -156,8 +165,11 @@ uninstall-daemon:
uninstall-miner: uninstall-miner:
rm -f /usr/local/bin/lotus-miner rm -f /usr/local/bin/lotus-miner
uninstall-provider: uninstall-curio:
rm -f /usr/local/bin/lotus-provider rm -f /usr/local/bin/curio
uninstall-sptool:
rm -f /usr/local/bin/sptool
uninstall-worker: uninstall-worker:
rm -f /usr/local/bin/lotus-worker rm -f /usr/local/bin/lotus-worker
@ -246,7 +258,9 @@ install-daemon-service: install-daemon
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
systemctl daemon-reload systemctl daemon-reload
@echo @echo
@echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup." @echo "lotus-daemon service installed."
@echo "To start the service, run: 'sudo systemctl start lotus-daemon'"
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-daemon'"
install-miner-service: install-miner install-daemon-service install-miner-service: install-miner install-daemon-service
mkdir -p /etc/systemd/system mkdir -p /etc/systemd/system
@ -254,15 +268,17 @@ install-miner-service: install-miner install-daemon-service
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
systemctl daemon-reload systemctl daemon-reload
@echo @echo
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup." @echo "lotus-miner service installed."
@echo "To start the service, run: 'sudo systemctl start lotus-miner'"
@echo "To enable the service on startup, run: 'sudo systemctl enable lotus-miner'"
install-provider-service: install-provider install-daemon-service install-curio-service: install-curio install-sptool install-daemon-service
mkdir -p /etc/systemd/system mkdir -p /etc/systemd/system
mkdir -p /var/log/lotus mkdir -p /var/log/lotus
install -C -m 0644 ./scripts/lotus-provider.service /etc/systemd/system/lotus-provider.service install -C -m 0644 ./scripts/curio.service /etc/systemd/system/curio.service
systemctl daemon-reload systemctl daemon-reload
@echo @echo
@echo "lotus-provider service installed. Don't forget to run 'sudo systemctl start lotus-provider' to start it and 'sudo systemctl enable lotus-provider' for it to be enabled on startup." @echo "Curio service installed. Don't forget to run 'sudo systemctl start curio' to start it and 'sudo systemctl enable curio' for it to be enabled on startup."
install-main-services: install-miner-service install-main-services: install-miner-service
@ -282,10 +298,10 @@ clean-miner-service:
rm -f /etc/systemd/system/lotus-miner.service rm -f /etc/systemd/system/lotus-miner.service
systemctl daemon-reload systemctl daemon-reload
clean-provider-service: clean-curio-service:
-systemctl stop lotus-provider -systemctl stop curio
-systemctl disable lotus-provider -systemctl disable curio
rm -f /etc/systemd/system/lotus-provider.service rm -f /etc/systemd/system/curio.service
systemctl daemon-reload systemctl daemon-reload
clean-main-services: clean-daemon-service clean-main-services: clean-daemon-service
@ -303,6 +319,10 @@ install-completions:
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
unittests:
@$(GOCC) test $(shell go list ./... | grep -v /lotus/itests)
.PHONY: unittests
clean: clean:
rm -rf $(CLEAN) $(BINS) rm -rf $(CLEAN) $(BINS)
-$(MAKE) -C $(FFI_PATH) clean -$(MAKE) -C $(FFI_PATH) clean
@ -324,7 +344,7 @@ actors-code-gen:
$(GOCC) fmt ./... $(GOCC) fmt ./...
actors-gen: actors-code-gen actors-gen: actors-code-gen
./scripts/fiximports $(GOCC) run ./scripts/fiximports
.PHONY: actors-gen .PHONY: actors-gen
bundle-gen: bundle-gen:
@ -358,7 +378,7 @@ docsgen-md-bin: api-gen actors-gen
docsgen-openrpc-bin: api-gen actors-gen docsgen-openrpc-bin: api-gen actors-gen
$(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd $(GOCC) build $(GOFLAGS) -o docgen-openrpc ./api/docgen-openrpc/cmd
docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-provider docsgen-md: docsgen-md-full docsgen-md-storage docsgen-md-worker docsgen-md-curio
docsgen-md-full: docsgen-md-bin docsgen-md-full: docsgen-md-bin
./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md ./docgen-md "api/api_full.go" "FullNode" "api" "./api" > documentation/en/api-v1-unstable-methods.md
@ -367,46 +387,73 @@ docsgen-md-storage: docsgen-md-bin
./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md ./docgen-md "api/api_storage.go" "StorageMiner" "api" "./api" > documentation/en/api-v0-methods-miner.md
docsgen-md-worker: docsgen-md-bin docsgen-md-worker: docsgen-md-bin
./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md ./docgen-md "api/api_worker.go" "Worker" "api" "./api" > documentation/en/api-v0-methods-worker.md
docsgen-md-provider: docsgen-md-bin docsgen-md-curio: docsgen-md-bin
./docgen-md "api/api_lp.go" "Provider" "api" "./api" > documentation/en/api-v0-methods-provider.md ./docgen-md "api/api_curio.go" "Curio" "api" "./api" > documentation/en/api-v0-methods-curio.md
docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway docsgen-openrpc: docsgen-openrpc-full docsgen-openrpc-storage docsgen-openrpc-worker docsgen-openrpc-gateway
docsgen-openrpc-full: docsgen-openrpc-bin docsgen-openrpc-full: docsgen-openrpc-bin
./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" -gzip > build/openrpc/full.json.gz ./docgen-openrpc "api/api_full.go" "FullNode" "api" "./api" > build/openrpc/full.json
docsgen-openrpc-storage: docsgen-openrpc-bin docsgen-openrpc-storage: docsgen-openrpc-bin
./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" -gzip > build/openrpc/miner.json.gz ./docgen-openrpc "api/api_storage.go" "StorageMiner" "api" "./api" > build/openrpc/miner.json
docsgen-openrpc-worker: docsgen-openrpc-bin docsgen-openrpc-worker: docsgen-openrpc-bin
./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" -gzip > build/openrpc/worker.json.gz ./docgen-openrpc "api/api_worker.go" "Worker" "api" "./api" > build/openrpc/worker.json
docsgen-openrpc-gateway: docsgen-openrpc-bin docsgen-openrpc-gateway: docsgen-openrpc-bin
./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" -gzip > build/openrpc/gateway.json.gz ./docgen-openrpc "api/api_gateway.go" "Gateway" "api" "./api" > build/openrpc/gateway.json
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin .PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
fiximports: fiximports:
./scripts/fiximports $(GOCC) run ./scripts/fiximports
gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen
./scripts/fiximports $(GOCC) run ./scripts/fiximports
@echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'" @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'"
.PHONY: gen .PHONY: gen
jen: gen jen: gen
snap: lotus lotus-miner lotus-worker lotus-provider snap: lotus lotus-miner lotus-worker curio sptool
snapcraft snapcraft
# snapcraft upload ./lotus_*.snap # snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries # separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker lotus-provider docsgen-cli: lotus lotus-miner lotus-worker curio sptool
python3 ./scripts/generate-lotus-cli.py python3 ./scripts/generate-lotus-cli.py
./lotus config default > documentation/en/default-lotus-config.toml ./lotus config default > documentation/en/default-lotus-config.toml
./lotus-miner config default > documentation/en/default-lotus-miner-config.toml ./lotus-miner config default > documentation/en/default-lotus-miner-config.toml
./lotus-provider config default > documentation/en/default-lotus-provider-config.toml ./curio config default > documentation/en/default-curio-config.toml
.PHONY: docsgen-cli .PHONY: docsgen-cli
print-%: print-%:
@echo $*=$($*) @echo $*=$($*)
circleci: ### Curio devnet images
go generate -x ./.circleci curio_docker_user?=curio
curio_base_image=$(curio_docker_user)/curio-all-in-one:latest-debug
ffi_from_source?=0
curio-devnet: lotus lotus-miner lotus-shed lotus-seed curio sptool
.PHONY: curio-devnet
curio_docker_build_cmd=docker build --build-arg CURIO_TEST_IMAGE=$(curio_base_image) \
--build-arg FFI_BUILD_FROM_SOURCE=$(ffi_from_source) $(docker_args)
docker/curio-all-in-one:
$(curio_docker_build_cmd) -f Dockerfile.curio --target curio-all-in-one \
-t $(curio_base_image) --build-arg GOFLAGS=-tags=debug .
.PHONY: docker/curio-all-in-one
docker/%:
cd curiosrc/docker/$* && DOCKER_BUILDKIT=1 $(curio_docker_build_cmd) -t $(curio_docker_user)/$*-dev:dev \
--build-arg BUILD_VERSION=dev .
docker/curio-devnet: $(lotus_build_cmd) \
docker/curio-all-in-one docker/lotus docker/lotus-miner docker/curio docker/yugabyte
.PHONY: docker/curio-devnet
curio-devnet/up:
rm -rf ./curiosrc/docker/data && docker compose -f ./curiosrc/docker/docker-compose.yaml up -d
curio-devnet/down:
docker compose -f ./curiosrc/docker/docker-compose.yaml down --rmi=local && sleep 2 && rm -rf ./curiosrc/docker/data

View File

@ -7,10 +7,12 @@
<h1 align="center">Project Lotus - 莲</h1> <h1 align="center">Project Lotus - 莲</h1>
<p align="center"> <p align="center">
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a> ![example workflow](https://github.com/github/docs/actions/workflows/main.yml/badge.svg)
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a> <a href="https://github.com/filecoin-project/lotus/actions/workflows/build.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/build.yml/badge.svg"></a>
<a href="https://github.com/filecoin-project/lotus/actions/workflows/check.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/check.yml/badge.svg"></a>
<a href="https://github.com/filecoin-project/lotus/actions/workflows/test.yml"><img src="https://github.com/filecoin-project/lotus/actions/workflows/test.yml/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a> <a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.18.8-blue.svg" /></a> <a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.21.7-blue.svg" /></a>
<br> <br>
</p> </p>

34
api/api_curio.go Normal file
View File

@ -0,0 +1,34 @@
package api
import (
"context"
"net/http"
"net/url"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/storage/sealer/fsutil"
"github.com/filecoin-project/lotus/storage/sealer/storiface"
)
type Curio interface {
Version(context.Context) (Version, error) //perm:admin
AllocatePieceToSector(ctx context.Context, maddr address.Address, piece PieceDealInfo, rawSize int64, source url.URL, header http.Header) (SectorOffset, error) //perm:write
StorageInit(ctx context.Context, path string, opts storiface.LocalStorageMeta) error //perm:admin
StorageAddLocal(ctx context.Context, path string) error //perm:admin
StorageDetachLocal(ctx context.Context, path string) error //perm:admin
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin
StorageLocal(ctx context.Context) (map[storiface.ID]string, error) //perm:admin
StorageStat(ctx context.Context, id storiface.ID) (fsutil.FsStat, error) //perm:admin
StorageInfo(context.Context, storiface.ID) (storiface.StorageInfo, error) //perm:admin
StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]storiface.SectorStorageInfo, error) //perm:admin
LogList(ctx context.Context) ([]string, error) //perm:read
LogSetLevel(ctx context.Context, subsystem, level string) error //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
}

View File

@ -335,7 +335,7 @@ type FullNode interface {
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet. // WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one. // WalletSetDefault marks the given address as the default one.
WalletSetDefault(context.Context, address.Address) error //perm:write WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet. // WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
@ -904,9 +904,6 @@ type FullNode interface {
// the path specified when calling CreateBackup is within the base path // the path specified when calling CreateBackup is within the base path
CreateBackup(ctx context.Context, fpath string) error //perm:admin CreateBackup(ctx context.Context, fpath string) error //perm:admin
RaftState(ctx context.Context) (*RaftStateData, error) //perm:read
RaftLeader(ctx context.Context) (peer.ID, error) //perm:read
// Actor events // Actor events
// GetActorEventsRaw returns all user-programmed and built-in actor events that match the given // GetActorEventsRaw returns all user-programmed and built-in actor events that match the given

View File

@ -77,6 +77,7 @@ type Gateway interface {
StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (MarketBalance, error)
StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*MarketDeal, error)
StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (MinerInfo, error)
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error)
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
StateNetworkName(context.Context) (dtypes.NetworkName, error) StateNetworkName(context.Context) (dtypes.NetworkName, error)
@ -90,6 +91,8 @@ type Gateway interface {
Version(context.Context) (APIVersion, error) Version(context.Context) (APIVersion, error)
Discover(context.Context) (apitypes.OpenRPCDocument, error) Discover(context.Context) (apitypes.OpenRPCDocument, error)
EthAddressToFilecoinAddress(ctx context.Context, ethAddress ethtypes.EthAddress) (address.Address, error)
FilecoinAddressToEthAddress(ctx context.Context, filecoinAddress address.Address) (ethtypes.EthAddress, error)
EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error)
EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error)
EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error) EthGetBlockTransactionCountByNumber(ctx context.Context, blkNum ethtypes.EthUint64) (ethtypes.EthUint64, error)

View File

@ -1,10 +0,0 @@
package api
import "context"
type LotusProvider interface {
Version(context.Context) (Version, error) //perm:admin
// Trigger shutdown
Shutdown(context.Context) error //perm:admin
}

View File

@ -200,7 +200,7 @@ type StorageMiner interface {
// StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference. // StorageBestAlloc returns list of paths where sector files of the specified type can be allocated, ordered by preference.
// Paths with more weight and more % of free space are preferred. // Paths with more weight and more % of free space are preferred.
// Note: This method doesn't filter paths based on AllowTypes/DenyTypes. // Note: This method doesn't filter paths based on AllowTypes/DenyTypes.
StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]storiface.StorageInfo, error) //perm:admin StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType, miner abi.ActorID) ([]storiface.StorageInfo, error) //perm:admin
StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin
StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin
StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin StorageList(ctx context.Context) (map[storiface.ID][]storiface.Decl, error) //perm:admin

View File

@ -15,9 +15,9 @@ import (
"github.com/filecoin-project/lotus/lib/rpcenc" "github.com/filecoin-project/lotus/lib/rpcenc"
) )
// NewProviderRpc creates a new http jsonrpc client. // NewCurioRpc creates a new http jsonrpc client.
func NewProviderRpc(ctx context.Context, addr string, requestHeader http.Header) (api.LotusProvider, jsonrpc.ClientCloser, error) { func NewCurioRpc(ctx context.Context, addr string, requestHeader http.Header) (api.Curio, jsonrpc.ClientCloser, error) {
var res v1api.LotusProviderStruct var res v1api.CurioStruct
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin", closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors)) api.GetInternalStructs(&res), requestHeader, jsonrpc.WithErrors(api.RPCErrors))

View File

@ -146,6 +146,7 @@ func init() {
allocationId := verifreg.AllocationId(0) allocationId := verifreg.AllocationId(0)
addExample(allocationId) addExample(allocationId)
addExample(&allocationId) addExample(&allocationId)
addExample(miner.SectorOnChainInfoFlags(0))
addExample(map[verifreg.AllocationId]verifreg.Allocation{}) addExample(map[verifreg.AllocationId]verifreg.Allocation{})
claimId := verifreg.ClaimId(0) claimId := verifreg.ClaimId(0)
addExample(claimId) addExample(claimId)
@ -356,10 +357,6 @@ func init() {
addExample(map[string]bitfield.BitField{ addExample(map[string]bitfield.BitField{
"": bitfield.NewFromSet([]uint64{5, 6, 7, 10}), "": bitfield.NewFromSet([]uint64{5, 6, 7, 10}),
}) })
addExample(&api.RaftStateData{
NonceMap: make(map[address.Address]uint64),
MsgUuids: make(map[uuid.UUID]*types.SignedMessage),
})
addExample(http.Header{ addExample(http.Header{
"Authorization": []string{"Bearer ey.."}, "Authorization": []string{"Bearer ey.."},
@ -459,10 +456,10 @@ func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []r
i = &api.GatewayStruct{} i = &api.GatewayStruct{}
t = reflect.TypeOf(new(struct{ api.Gateway })).Elem() t = reflect.TypeOf(new(struct{ api.Gateway })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal)) permStruct = append(permStruct, reflect.TypeOf(api.GatewayStruct{}.Internal))
case "Provider": case "Curio":
i = &api.LotusProviderStruct{} i = &api.CurioStruct{}
t = reflect.TypeOf(new(struct{ api.LotusProvider })).Elem() t = reflect.TypeOf(new(struct{ api.Curio })).Elem()
permStruct = append(permStruct, reflect.TypeOf(api.LotusProviderStruct{}.Internal)) permStruct = append(permStruct, reflect.TypeOf(api.CurioStruct{}.Internal))
default: default:
panic("unknown type") panic("unknown type")
} }

View File

@ -27,8 +27,9 @@ import (
auth "github.com/filecoin-project/go-jsonrpc/auth" auth "github.com/filecoin-project/go-jsonrpc/auth"
abi "github.com/filecoin-project/go-state-types/abi" abi "github.com/filecoin-project/go-state-types/abi"
big "github.com/filecoin-project/go-state-types/big" big "github.com/filecoin-project/go-state-types/big"
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner" miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
crypto "github.com/filecoin-project/go-state-types/crypto" crypto "github.com/filecoin-project/go-state-types/crypto"
dline "github.com/filecoin-project/go-state-types/dline" dline "github.com/filecoin-project/go-state-types/dline"
@ -36,7 +37,7 @@ import (
api "github.com/filecoin-project/lotus/api" api "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner" miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
types "github.com/filecoin-project/lotus/chain/types" types "github.com/filecoin-project/lotus/chain/types"
ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes" ethtypes "github.com/filecoin-project/lotus/chain/types/ethtypes"
alerting "github.com/filecoin-project/lotus/journal/alerting" alerting "github.com/filecoin-project/lotus/journal/alerting"
@ -2934,36 +2935,6 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4)
} }
// RaftLeader mocks base method.
func (m *MockFullNode) RaftLeader(arg0 context.Context) (peer.ID, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RaftLeader", arg0)
ret0, _ := ret[0].(peer.ID)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RaftLeader indicates an expected call of RaftLeader.
func (mr *MockFullNodeMockRecorder) RaftLeader(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftLeader", reflect.TypeOf((*MockFullNode)(nil).RaftLeader), arg0)
}
// RaftState mocks base method.
func (m *MockFullNode) RaftState(arg0 context.Context) (*api.RaftStateData, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RaftState", arg0)
ret0, _ := ret[0].(*api.RaftStateData)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// RaftState indicates an expected call of RaftState.
func (mr *MockFullNodeMockRecorder) RaftState(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RaftState", reflect.TypeOf((*MockFullNode)(nil).RaftState), arg0)
}
// Session mocks base method. // Session mocks base method.
func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
@ -3639,7 +3610,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
} }
// StateMinerInitialPledgeCollateral mocks base method. // StateMinerInitialPledgeCollateral mocks base method.
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(big.Int) ret0, _ := ret[0].(big.Int)
@ -3684,7 +3655,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
} }
// StateMinerPreCommitDepositForPower mocks base method. // StateMinerPreCommitDepositForPower mocks base method.
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(big.Int) ret0, _ := ret[0].(big.Int)
@ -3849,10 +3820,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsg(arg0, arg1, arg2, arg3, arg4
} }
// StateSectorExpiration mocks base method. // StateSectorExpiration mocks base method.
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) { func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*miner0.SectorExpiration) ret0, _ := ret[0].(*miner1.SectorExpiration)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -3879,10 +3850,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
} }
// StateSectorPartition mocks base method. // StateSectorPartition mocks base method.
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) { func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*miner0.SectorLocation) ret0, _ := ret[0].(*miner1.SectorLocation)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -3894,10 +3865,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
} }
// StateSectorPreCommitInfo mocks base method. // StateSectorPreCommitInfo mocks base method.
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner.SectorPreCommitOnChainInfo, error) { func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorPreCommitOnChainInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*miner.SectorPreCommitOnChainInfo) ret0, _ := ret[0].(*miner0.SectorPreCommitOnChainInfo)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -5,6 +5,8 @@ package api
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"net/http"
"net/url"
"time" "time"
"github.com/google/uuid" "github.com/google/uuid"
@ -113,6 +115,41 @@ type CommonNetStub struct {
NetStub NetStub
} }
type CurioStruct struct {
Internal CurioMethods
}
type CurioMethods struct {
AllocatePieceToSector func(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) `perm:"write"`
LogList func(p0 context.Context) ([]string, error) `perm:"read"`
LogSetLevel func(p0 context.Context, p1 string, p2 string) error `perm:"admin"`
Shutdown func(p0 context.Context) error `perm:"admin"`
StorageAddLocal func(p0 context.Context, p1 string) error `perm:"admin"`
StorageDetachLocal func(p0 context.Context, p1 string) error `perm:"admin"`
StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) `perm:"admin"`
StorageInfo func(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) `perm:"admin"`
StorageInit func(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error `perm:"admin"`
StorageList func(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) `perm:"admin"`
StorageLocal func(p0 context.Context) (map[storiface.ID]string, error) `perm:"admin"`
StorageStat func(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) `perm:"admin"`
Version func(p0 context.Context) (Version, error) `perm:"admin"`
}
type CurioStub struct {
}
type EthSubscriberStruct struct { type EthSubscriberStruct struct {
Internal EthSubscriberMethods Internal EthSubscriberMethods
} }
@ -457,10 +494,6 @@ type FullNodeMethods struct {
PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"` PaychVoucherSubmit func(p0 context.Context, p1 address.Address, p2 *paych.SignedVoucher, p3 []byte, p4 []byte) (cid.Cid, error) `perm:"sign"`
RaftLeader func(p0 context.Context) (peer.ID, error) `perm:"read"`
RaftState func(p0 context.Context) (*RaftStateData, error) `perm:"read"`
StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"` StateAccountKey func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) `perm:"read"`
StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"` StateActorCodeCIDs func(p0 context.Context, p1 abinetwork.Version) (map[string]cid.Cid, error) `perm:"read"`
@ -687,6 +720,8 @@ type GatewayMethods struct {
EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) `` EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) ``
EthAddressToFilecoinAddress func(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) ``
EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) `` EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) ``
EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `` EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) ``
@ -757,6 +792,8 @@ type GatewayMethods struct {
EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `` EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) ``
FilecoinAddressToEthAddress func(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) ``
GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) `` GasEstimateGasPremium func(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) ``
GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) ``
@ -811,6 +848,8 @@ type GatewayMethods struct {
StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) `` StateMarketStorageDeal func(p0 context.Context, p1 abi.DealID, p2 types.TipSetKey) (*MarketDeal, error) ``
StateMinerDeadlines func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) ``
StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) `` StateMinerInfo func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) ``
StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) `` StateMinerPower func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*MinerPower, error) ``
@ -849,19 +888,6 @@ type GatewayMethods struct {
type GatewayStub struct { type GatewayStub struct {
} }
type LotusProviderStruct struct {
Internal LotusProviderMethods
}
type LotusProviderMethods struct {
Shutdown func(p0 context.Context) error `perm:"admin"`
Version func(p0 context.Context) (Version, error) `perm:"admin"`
}
type LotusProviderStub struct {
}
type NetStruct struct { type NetStruct struct {
Internal NetMethods Internal NetMethods
} }
@ -1169,7 +1195,7 @@ type StorageMinerMethods struct {
StorageAuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"` StorageAuthVerify func(p0 context.Context, p1 string) ([]auth.Permission, error) `perm:"read"`
StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) `perm:"admin"` StorageBestAlloc func(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) `perm:"admin"`
StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"` StorageDeclareSector func(p0 context.Context, p1 storiface.ID, p2 abi.SectorID, p3 storiface.SectorFileType, p4 bool) error `perm:"admin"`
@ -1472,6 +1498,149 @@ func (s *CommonStub) Version(p0 context.Context) (APIVersion, error) {
return *new(APIVersion), ErrNotSupported return *new(APIVersion), ErrNotSupported
} }
func (s *CurioStruct) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
if s.Internal.AllocatePieceToSector == nil {
return *new(SectorOffset), ErrNotSupported
}
return s.Internal.AllocatePieceToSector(p0, p1, p2, p3, p4, p5)
}
func (s *CurioStub) AllocatePieceToSector(p0 context.Context, p1 address.Address, p2 PieceDealInfo, p3 int64, p4 url.URL, p5 http.Header) (SectorOffset, error) {
return *new(SectorOffset), ErrNotSupported
}
func (s *CurioStruct) LogList(p0 context.Context) ([]string, error) {
if s.Internal.LogList == nil {
return *new([]string), ErrNotSupported
}
return s.Internal.LogList(p0)
}
func (s *CurioStub) LogList(p0 context.Context) ([]string, error) {
return *new([]string), ErrNotSupported
}
func (s *CurioStruct) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
if s.Internal.LogSetLevel == nil {
return ErrNotSupported
}
return s.Internal.LogSetLevel(p0, p1, p2)
}
func (s *CurioStub) LogSetLevel(p0 context.Context, p1 string, p2 string) error {
return ErrNotSupported
}
func (s *CurioStruct) Shutdown(p0 context.Context) error {
if s.Internal.Shutdown == nil {
return ErrNotSupported
}
return s.Internal.Shutdown(p0)
}
func (s *CurioStub) Shutdown(p0 context.Context) error {
return ErrNotSupported
}
func (s *CurioStruct) StorageAddLocal(p0 context.Context, p1 string) error {
if s.Internal.StorageAddLocal == nil {
return ErrNotSupported
}
return s.Internal.StorageAddLocal(p0, p1)
}
func (s *CurioStub) StorageAddLocal(p0 context.Context, p1 string) error {
return ErrNotSupported
}
func (s *CurioStruct) StorageDetachLocal(p0 context.Context, p1 string) error {
if s.Internal.StorageDetachLocal == nil {
return ErrNotSupported
}
return s.Internal.StorageDetachLocal(p0, p1)
}
func (s *CurioStub) StorageDetachLocal(p0 context.Context, p1 string) error {
return ErrNotSupported
}
func (s *CurioStruct) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
if s.Internal.StorageFindSector == nil {
return *new([]storiface.SectorStorageInfo), ErrNotSupported
}
return s.Internal.StorageFindSector(p0, p1, p2, p3, p4)
}
func (s *CurioStub) StorageFindSector(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]storiface.SectorStorageInfo, error) {
return *new([]storiface.SectorStorageInfo), ErrNotSupported
}
func (s *CurioStruct) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
if s.Internal.StorageInfo == nil {
return *new(storiface.StorageInfo), ErrNotSupported
}
return s.Internal.StorageInfo(p0, p1)
}
func (s *CurioStub) StorageInfo(p0 context.Context, p1 storiface.ID) (storiface.StorageInfo, error) {
return *new(storiface.StorageInfo), ErrNotSupported
}
func (s *CurioStruct) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error {
if s.Internal.StorageInit == nil {
return ErrNotSupported
}
return s.Internal.StorageInit(p0, p1, p2)
}
func (s *CurioStub) StorageInit(p0 context.Context, p1 string, p2 storiface.LocalStorageMeta) error {
return ErrNotSupported
}
func (s *CurioStruct) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
if s.Internal.StorageList == nil {
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
}
return s.Internal.StorageList(p0)
}
func (s *CurioStub) StorageList(p0 context.Context) (map[storiface.ID][]storiface.Decl, error) {
return *new(map[storiface.ID][]storiface.Decl), ErrNotSupported
}
func (s *CurioStruct) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
if s.Internal.StorageLocal == nil {
return *new(map[storiface.ID]string), ErrNotSupported
}
return s.Internal.StorageLocal(p0)
}
func (s *CurioStub) StorageLocal(p0 context.Context) (map[storiface.ID]string, error) {
return *new(map[storiface.ID]string), ErrNotSupported
}
func (s *CurioStruct) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
if s.Internal.StorageStat == nil {
return *new(fsutil.FsStat), ErrNotSupported
}
return s.Internal.StorageStat(p0, p1)
}
func (s *CurioStub) StorageStat(p0 context.Context, p1 storiface.ID) (fsutil.FsStat, error) {
return *new(fsutil.FsStat), ErrNotSupported
}
func (s *CurioStruct) Version(p0 context.Context) (Version, error) {
if s.Internal.Version == nil {
return *new(Version), ErrNotSupported
}
return s.Internal.Version(p0)
}
func (s *CurioStub) Version(p0 context.Context) (Version, error) {
return *new(Version), ErrNotSupported
}
func (s *EthSubscriberStruct) EthSubscription(p0 context.Context, p1 jsonrpc.RawParams) error { func (s *EthSubscriberStruct) EthSubscription(p0 context.Context, p1 jsonrpc.RawParams) error {
if s.Internal.EthSubscription == nil { if s.Internal.EthSubscription == nil {
return ErrNotSupported return ErrNotSupported
@ -3265,28 +3434,6 @@ func (s *FullNodeStub) PaychVoucherSubmit(p0 context.Context, p1 address.Address
return *new(cid.Cid), ErrNotSupported return *new(cid.Cid), ErrNotSupported
} }
func (s *FullNodeStruct) RaftLeader(p0 context.Context) (peer.ID, error) {
if s.Internal.RaftLeader == nil {
return *new(peer.ID), ErrNotSupported
}
return s.Internal.RaftLeader(p0)
}
func (s *FullNodeStub) RaftLeader(p0 context.Context) (peer.ID, error) {
return *new(peer.ID), ErrNotSupported
}
func (s *FullNodeStruct) RaftState(p0 context.Context) (*RaftStateData, error) {
if s.Internal.RaftState == nil {
return nil, ErrNotSupported
}
return s.Internal.RaftState(p0)
}
func (s *FullNodeStub) RaftState(p0 context.Context) (*RaftStateData, error) {
return nil, ErrNotSupported
}
func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) { func (s *FullNodeStruct) StateAccountKey(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (address.Address, error) {
if s.Internal.StateAccountKey == nil { if s.Internal.StateAccountKey == nil {
return *new(address.Address), ErrNotSupported return *new(address.Address), ErrNotSupported
@ -4464,6 +4611,17 @@ func (s *GatewayStub) EthAccounts(p0 context.Context) ([]ethtypes.EthAddress, er
return *new([]ethtypes.EthAddress), ErrNotSupported return *new([]ethtypes.EthAddress), ErrNotSupported
} }
func (s *GatewayStruct) EthAddressToFilecoinAddress(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) {
if s.Internal.EthAddressToFilecoinAddress == nil {
return *new(address.Address), ErrNotSupported
}
return s.Internal.EthAddressToFilecoinAddress(p0, p1)
}
func (s *GatewayStub) EthAddressToFilecoinAddress(p0 context.Context, p1 ethtypes.EthAddress) (address.Address, error) {
return *new(address.Address), ErrNotSupported
}
func (s *GatewayStruct) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, error) { func (s *GatewayStruct) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, error) {
if s.Internal.EthBlockNumber == nil { if s.Internal.EthBlockNumber == nil {
return *new(ethtypes.EthUint64), ErrNotSupported return *new(ethtypes.EthUint64), ErrNotSupported
@ -4849,6 +5007,17 @@ func (s *GatewayStub) EthUnsubscribe(p0 context.Context, p1 ethtypes.EthSubscrip
return false, ErrNotSupported return false, ErrNotSupported
} }
func (s *GatewayStruct) FilecoinAddressToEthAddress(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) {
if s.Internal.FilecoinAddressToEthAddress == nil {
return *new(ethtypes.EthAddress), ErrNotSupported
}
return s.Internal.FilecoinAddressToEthAddress(p0, p1)
}
func (s *GatewayStub) FilecoinAddressToEthAddress(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) {
return *new(ethtypes.EthAddress), ErrNotSupported
}
func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) { func (s *GatewayStruct) GasEstimateGasPremium(p0 context.Context, p1 uint64, p2 address.Address, p3 int64, p4 types.TipSetKey) (types.BigInt, error) {
if s.Internal.GasEstimateGasPremium == nil { if s.Internal.GasEstimateGasPremium == nil {
return *new(types.BigInt), ErrNotSupported return *new(types.BigInt), ErrNotSupported
@ -5146,6 +5315,17 @@ func (s *GatewayStub) StateMarketStorageDeal(p0 context.Context, p1 abi.DealID,
return nil, ErrNotSupported return nil, ErrNotSupported
} }
func (s *GatewayStruct) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
if s.Internal.StateMinerDeadlines == nil {
return *new([]Deadline), ErrNotSupported
}
return s.Internal.StateMinerDeadlines(p0, p1, p2)
}
func (s *GatewayStub) StateMinerDeadlines(p0 context.Context, p1 address.Address, p2 types.TipSetKey) ([]Deadline, error) {
return *new([]Deadline), ErrNotSupported
}
func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) { func (s *GatewayStruct) StateMinerInfo(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MinerInfo, error) {
if s.Internal.StateMinerInfo == nil { if s.Internal.StateMinerInfo == nil {
return *new(MinerInfo), ErrNotSupported return *new(MinerInfo), ErrNotSupported
@ -5333,28 +5513,6 @@ func (s *GatewayStub) Web3ClientVersion(p0 context.Context) (string, error) {
return "", ErrNotSupported return "", ErrNotSupported
} }
func (s *LotusProviderStruct) Shutdown(p0 context.Context) error {
if s.Internal.Shutdown == nil {
return ErrNotSupported
}
return s.Internal.Shutdown(p0)
}
func (s *LotusProviderStub) Shutdown(p0 context.Context) error {
return ErrNotSupported
}
func (s *LotusProviderStruct) Version(p0 context.Context) (Version, error) {
if s.Internal.Version == nil {
return *new(Version), ErrNotSupported
}
return s.Internal.Version(p0)
}
func (s *LotusProviderStub) Version(p0 context.Context) (Version, error) {
return *new(Version), ErrNotSupported
}
func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) {
if s.Internal.ID == nil { if s.Internal.ID == nil {
return *new(peer.ID), ErrNotSupported return *new(peer.ID), ErrNotSupported
@ -6895,14 +7053,14 @@ func (s *StorageMinerStub) StorageAuthVerify(p0 context.Context, p1 string) ([]a
return *new([]auth.Permission), ErrNotSupported return *new([]auth.Permission), ErrNotSupported
} }
func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) { func (s *StorageMinerStruct) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) {
if s.Internal.StorageBestAlloc == nil { if s.Internal.StorageBestAlloc == nil {
return *new([]storiface.StorageInfo), ErrNotSupported return *new([]storiface.StorageInfo), ErrNotSupported
} }
return s.Internal.StorageBestAlloc(p0, p1, p2, p3) return s.Internal.StorageBestAlloc(p0, p1, p2, p3, p4)
} }
func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType) ([]storiface.StorageInfo, error) { func (s *StorageMinerStub) StorageBestAlloc(p0 context.Context, p1 storiface.SectorFileType, p2 abi.SectorSize, p3 storiface.PathType, p4 abi.ActorID) ([]storiface.StorageInfo, error) {
return *new([]storiface.StorageInfo), ErrNotSupported return *new([]storiface.StorageInfo), ErrNotSupported
} }
@ -7580,10 +7738,10 @@ func (s *WorkerStub) WaitQuiet(p0 context.Context) error {
var _ ChainIO = new(ChainIOStruct) var _ ChainIO = new(ChainIOStruct)
var _ Common = new(CommonStruct) var _ Common = new(CommonStruct)
var _ CommonNet = new(CommonNetStruct) var _ CommonNet = new(CommonNetStruct)
var _ Curio = new(CurioStruct)
var _ EthSubscriber = new(EthSubscriberStruct) var _ EthSubscriber = new(EthSubscriberStruct)
var _ FullNode = new(FullNodeStruct) var _ FullNode = new(FullNodeStruct)
var _ Gateway = new(GatewayStruct) var _ Gateway = new(GatewayStruct)
var _ LotusProvider = new(LotusProviderStruct)
var _ Net = new(NetStruct) var _ Net = new(NetStruct)
var _ Signable = new(SignableStruct) var _ Signable = new(SignableStruct)
var _ StorageMiner = new(StorageMinerStruct) var _ StorageMiner = new(StorageMinerStruct)

View File

@ -69,11 +69,6 @@ type MessageSendSpec struct {
MaximizeFeeCap bool MaximizeFeeCap bool
} }
type MpoolMessageWhole struct {
Msg *types.Message
Spec *MessageSendSpec
}
// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync // GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync
type GraphSyncDataTransfer struct { type GraphSyncDataTransfer struct {
// GraphSync request id for this transfer // GraphSync request id for this transfer
@ -353,64 +348,6 @@ type ForkUpgradeParams struct {
UpgradePhoenixHeight abi.ChainEpoch UpgradePhoenixHeight abi.ChainEpoch
} }
type NonceMapType map[address.Address]uint64
type MsgUuidMapType map[uuid.UUID]*types.SignedMessage
type RaftStateData struct {
NonceMap NonceMapType
MsgUuids MsgUuidMapType
}
func (n *NonceMapType) MarshalJSON() ([]byte, error) {
marshalled := make(map[string]uint64)
for a, n := range *n {
marshalled[a.String()] = n
}
return json.Marshal(marshalled)
}
func (n *NonceMapType) UnmarshalJSON(b []byte) error {
unmarshalled := make(map[string]uint64)
err := json.Unmarshal(b, &unmarshalled)
if err != nil {
return err
}
*n = make(map[address.Address]uint64)
for saddr, nonce := range unmarshalled {
a, err := address.NewFromString(saddr)
if err != nil {
return err
}
(*n)[a] = nonce
}
return nil
}
func (m *MsgUuidMapType) MarshalJSON() ([]byte, error) {
marshalled := make(map[string]*types.SignedMessage)
for u, msg := range *m {
marshalled[u.String()] = msg
}
return json.Marshal(marshalled)
}
func (m *MsgUuidMapType) UnmarshalJSON(b []byte) error {
unmarshalled := make(map[string]*types.SignedMessage)
err := json.Unmarshal(b, &unmarshalled)
if err != nil {
return err
}
*m = make(map[uuid.UUID]*types.SignedMessage)
for suid, msg := range unmarshalled {
u, err := uuid.Parse(suid)
if err != nil {
return err
}
(*m)[u] = msg
}
return nil
}
// ChainExportConfig holds configuration for chain ranged exports. // ChainExportConfig holds configuration for chain ranged exports.
type ChainExportConfig struct { type ChainExportConfig struct {
WriteBufferSize int WriteBufferSize int

View File

@ -293,7 +293,7 @@ type FullNode interface {
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
// WalletDefaultAddress returns the address marked as default in the wallet. // WalletDefaultAddress returns the address marked as default in the wallet.
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
// WalletSetDefault marks the given address as as the default one. // WalletSetDefault marks the given address as the default one.
WalletSetDefault(context.Context, address.Address) error //perm:write WalletSetDefault(context.Context, address.Address) error //perm:write
// WalletExport returns the private key of an address in the wallet. // WalletExport returns the private key of an address in the wallet.
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin

View File

@ -26,8 +26,9 @@ import (
auth "github.com/filecoin-project/go-jsonrpc/auth" auth "github.com/filecoin-project/go-jsonrpc/auth"
abi "github.com/filecoin-project/go-state-types/abi" abi "github.com/filecoin-project/go-state-types/abi"
big "github.com/filecoin-project/go-state-types/big" big "github.com/filecoin-project/go-state-types/big"
miner "github.com/filecoin-project/go-state-types/builtin/v13/miner"
paych "github.com/filecoin-project/go-state-types/builtin/v8/paych" paych "github.com/filecoin-project/go-state-types/builtin/v8/paych"
miner "github.com/filecoin-project/go-state-types/builtin/v9/miner" miner0 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" verifreg "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
crypto "github.com/filecoin-project/go-state-types/crypto" crypto "github.com/filecoin-project/go-state-types/crypto"
dline "github.com/filecoin-project/go-state-types/dline" dline "github.com/filecoin-project/go-state-types/dline"
@ -36,7 +37,7 @@ import (
api "github.com/filecoin-project/lotus/api" api "github.com/filecoin-project/lotus/api"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
v0api "github.com/filecoin-project/lotus/api/v0api" v0api "github.com/filecoin-project/lotus/api/v0api"
miner0 "github.com/filecoin-project/lotus/chain/actors/builtin/miner" miner1 "github.com/filecoin-project/lotus/chain/actors/builtin/miner"
types "github.com/filecoin-project/lotus/chain/types" types "github.com/filecoin-project/lotus/chain/types"
alerting "github.com/filecoin-project/lotus/journal/alerting" alerting "github.com/filecoin-project/lotus/journal/alerting"
marketevents "github.com/filecoin-project/lotus/markets/loggers" marketevents "github.com/filecoin-project/lotus/markets/loggers"
@ -2699,7 +2700,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerInfo(arg0, arg1, arg2 interface{})
} }
// StateMinerInitialPledgeCollateral mocks base method. // StateMinerInitialPledgeCollateral mocks base method.
func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { func (m *MockFullNode) StateMinerInitialPledgeCollateral(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateMinerInitialPledgeCollateral", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(big.Int) ret0, _ := ret[0].(big.Int)
@ -2744,7 +2745,7 @@ func (mr *MockFullNodeMockRecorder) StateMinerPower(arg0, arg1, arg2 interface{}
} }
// StateMinerPreCommitDepositForPower mocks base method. // StateMinerPreCommitDepositForPower mocks base method.
func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) { func (m *MockFullNode) StateMinerPreCommitDepositForPower(arg0 context.Context, arg1 address.Address, arg2 miner0.SectorPreCommitInfo, arg3 types.TipSetKey) (big.Int, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateMinerPreCommitDepositForPower", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(big.Int) ret0, _ := ret[0].(big.Int)
@ -2924,10 +2925,10 @@ func (mr *MockFullNodeMockRecorder) StateSearchMsgLimited(arg0, arg1, arg2 inter
} }
// StateSectorExpiration mocks base method. // StateSectorExpiration mocks base method.
func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorExpiration, error) { func (m *MockFullNode) StateSectorExpiration(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorExpiration, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorExpiration", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*miner0.SectorExpiration) ret0, _ := ret[0].(*miner1.SectorExpiration)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -2954,10 +2955,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorGetInfo(arg0, arg1, arg2, arg3 in
} }
// StateSectorPartition mocks base method. // StateSectorPartition mocks base method.
func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner0.SectorLocation, error) { func (m *MockFullNode) StateSectorPartition(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (*miner1.SectorLocation, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorPartition", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(*miner0.SectorLocation) ret0, _ := ret[0].(*miner1.SectorLocation)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }
@ -2969,10 +2970,10 @@ func (mr *MockFullNodeMockRecorder) StateSectorPartition(arg0, arg1, arg2, arg3
} }
// StateSectorPreCommitInfo mocks base method. // StateSectorPreCommitInfo mocks base method.
func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { func (m *MockFullNode) StateSectorPreCommitInfo(arg0 context.Context, arg1 address.Address, arg2 abi.SectorNumber, arg3 types.TipSetKey) (miner0.SectorPreCommitOnChainInfo, error) {
m.ctrl.T.Helper() m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3) ret := m.ctrl.Call(m, "StateSectorPreCommitInfo", arg0, arg1, arg2, arg3)
ret0, _ := ret[0].(miner.SectorPreCommitOnChainInfo) ret0, _ := ret[0].(miner0.SectorPreCommitOnChainInfo)
ret1, _ := ret[1].(error) ret1, _ := ret[1].(error)
return ret0, ret1 return ret0, ret1
} }

View File

@ -13,4 +13,4 @@ func PermissionedFullAPI(a FullNode) FullNode {
return api.PermissionedFullAPI(a) return api.PermissionedFullAPI(a)
} }
type LotusProviderStruct = api.LotusProviderStruct type CurioStruct = api.CurioStruct

View File

@ -60,7 +60,7 @@ var (
MinerAPIVersion0 = newVer(1, 5, 0) MinerAPIVersion0 = newVer(1, 5, 0)
WorkerAPIVersion0 = newVer(1, 7, 0) WorkerAPIVersion0 = newVer(1, 7, 0)
ProviderAPIVersion0 = newVer(1, 0, 0) CurioAPIVersion0 = newVer(1, 0, 0)
) )
//nolint:varcheck,deadcode //nolint:varcheck,deadcode

View File

@ -109,11 +109,9 @@ func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) er
func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error {
// both stores are viewable. // both stores are viewable.
if err := bs.write.View(ctx, c, callback); ipld.IsNotFound(err) { if err := bs.write.View(ctx, c, callback); !ipld.IsNotFound(err) {
// not found in write blockstore; fall through.
} else {
return err // propagate errors, or nil, i.e. found. return err // propagate errors, or nil, i.e. found.
} } // else not found in write blockstore; fall through.
return bs.read.View(ctx, c, callback) return bs.read.View(ctx, c, callback)
} }

113
blockstore/cached.go Normal file
View File

@ -0,0 +1,113 @@
package blockstore
import (
"context"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
)
// BlockstoreCache is a cache for blocks, compatible with lru.Cache; Must be safe for concurrent access
type BlockstoreCache interface {
Remove(mhString MhString) bool
Contains(mhString MhString) bool
Get(mhString MhString) (blocks.Block, bool)
Add(mhString MhString, block blocks.Block) (evicted bool)
}
type ReadCachedBlockstore struct {
top Blockstore
cache BlockstoreCache
}
type MhString string
func NewReadCachedBlockstore(top Blockstore, cache BlockstoreCache) *ReadCachedBlockstore {
return &ReadCachedBlockstore{
top: top,
cache: cache,
}
}
func (c *ReadCachedBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
c.cache.Remove(MhString(cid.Hash()))
return c.top.DeleteBlock(ctx, cid)
}
func (c *ReadCachedBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
if c.cache.Contains(MhString(cid.Hash())) {
return true, nil
}
return c.top.Has(ctx, cid)
}
func (c *ReadCachedBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
if out, ok := c.cache.Get(MhString(cid.Hash())); ok {
return out, nil
}
out, err := c.top.Get(ctx, cid)
if err != nil {
return nil, err
}
c.cache.Add(MhString(cid.Hash()), out)
return out, nil
}
func (c *ReadCachedBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
if b, ok := c.cache.Get(MhString(cid.Hash())); ok {
return len(b.RawData()), nil
}
return c.top.GetSize(ctx, cid)
}
func (c *ReadCachedBlockstore) Put(ctx context.Context, block blocks.Block) error {
c.cache.Add(MhString(block.Cid().Hash()), block)
return c.top.Put(ctx, block)
}
func (c *ReadCachedBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
for _, b := range blocks {
c.cache.Add(MhString(b.Cid().Hash()), b)
}
return c.top.PutMany(ctx, blocks)
}
func (c *ReadCachedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return c.top.AllKeysChan(ctx)
}
func (c *ReadCachedBlockstore) HashOnRead(enabled bool) {
c.top.HashOnRead(enabled)
}
func (c *ReadCachedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error {
return c.top.View(ctx, cid, func(bb []byte) error {
blk, err := blocks.NewBlockWithCid(bb, cid)
if err != nil {
return err
}
c.cache.Add(MhString(cid.Hash()), blk)
return callback(bb)
})
}
func (c *ReadCachedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error {
for _, ci := range cids {
c.cache.Remove(MhString(ci.Hash()))
}
return c.top.DeleteMany(ctx, cids)
}
func (c *ReadCachedBlockstore) Flush(ctx context.Context) error {
return c.top.Flush(ctx)
}
var _ Blockstore = (*ReadCachedBlockstore)(nil)

View File

@ -1,154 +0,0 @@
package blockstore
import (
"bytes"
"context"
"io"
"github.com/ipfs/boxo/path"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multiaddr"
"github.com/multiformats/go-multihash"
"golang.org/x/xerrors"
rpc "github.com/filecoin-project/kubo-api-client"
iface "github.com/filecoin-project/kubo-api-client/coreiface"
"github.com/filecoin-project/kubo-api-client/coreiface/options"
)
type IPFSBlockstore struct {
ctx context.Context
api, offlineAPI iface.CoreAPI
}
var _ BasicBlockstore = (*IPFSBlockstore)(nil)
func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) {
localApi, err := rpc.NewLocalApi()
if err != nil {
return nil, xerrors.Errorf("getting local ipfs api: %w", err)
}
api, err := localApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("setting offline mode: %s", err)
}
offlineAPI := api
if onlineMode {
offlineAPI, err = localApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) {
httpApi, err := rpc.NewApi(maddr)
if err != nil {
return nil, xerrors.Errorf("setting remote ipfs api: %w", err)
}
api, err := httpApi.WithOptions(options.Api.Offline(!onlineMode))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
offlineAPI := api
if onlineMode {
offlineAPI, err = httpApi.WithOptions(options.Api.Offline(true))
if err != nil {
return nil, xerrors.Errorf("applying offline mode: %s", err)
}
}
bs := &IPFSBlockstore{
ctx: ctx,
api: api,
offlineAPI: offlineAPI,
}
return Adapt(bs), nil
}
func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error {
return xerrors.Errorf("not supported")
}
func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) {
_, err := i.offlineAPI.Block().Stat(ctx, path.FromCid(cid))
if err != nil {
// The underlying client is running in Offline mode.
// Stat() will fail with an err if the block isn't in the
// blockstore. If that's the case, return false without
// an error since that's the original intention of this method.
if err.Error() == "blockservice: key not found" {
return false, nil
}
return false, xerrors.Errorf("getting ipfs block: %w", err)
}
return true, nil
}
func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) {
rd, err := i.api.Block().Get(ctx, path.FromCid(cid))
if err != nil {
return nil, xerrors.Errorf("getting ipfs block: %w", err)
}
data, err := io.ReadAll(rd)
if err != nil {
return nil, err
}
return blocks.NewBlockWithCid(data, cid)
}
func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) {
st, err := i.api.Block().Stat(ctx, path.FromCid(cid))
if err != nil {
return 0, xerrors.Errorf("getting ipfs block: %w", err)
}
return st.Size(), nil
}
func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error {
mhd, err := multihash.Decode(block.Cid().Hash())
if err != nil {
return err
}
_, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()),
options.Block.Hash(mhd.Code, mhd.Length),
options.Block.Format(multihash.Codes[block.Cid().Type()]))
return err
}
func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error {
// TODO: could be done in parallel
for _, block := range blocks {
if err := i.Put(ctx, block); err != nil {
return err
}
}
return nil
}
func (i *IPFSBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
return nil, xerrors.Errorf("not supported")
}
func (i *IPFSBlockstore) HashOnRead(enabled bool) {
return // TODO: We could technically support this, but..
}

View File

@ -282,14 +282,14 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co
if ss.checkpointExists() { if ss.checkpointExists() {
log.Info("found compaction checkpoint; resuming compaction") log.Info("found compaction checkpoint; resuming compaction")
if err := ss.completeCompaction(); err != nil { if err := ss.completeCompaction(); err != nil {
markSetEnv.Close() //nolint:errcheck _ = markSetEnv.Close()
return nil, xerrors.Errorf("error resuming compaction: %w", err) return nil, xerrors.Errorf("error resuming compaction: %w", err)
} }
} }
if ss.pruneCheckpointExists() { if ss.pruneCheckpointExists() {
log.Info("found prune checkpoint; resuming prune") log.Info("found prune checkpoint; resuming prune")
if err := ss.completePrune(); err != nil { if err := ss.completePrune(); err != nil {
markSetEnv.Close() //nolint:errcheck _ = markSetEnv.Close()
return nil, xerrors.Errorf("error resuming prune: %w", err) return nil, xerrors.Errorf("error resuming prune: %w", err)
} }
} }

View File

@ -109,16 +109,13 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
// TODO: ok to use hysteresis with no transitions between 30s and 1m? // TODO: ok to use hysteresis with no transitions between 30s and 1m?
if time.Since(timestamp) < SyncWaitTime { if time.Since(timestamp) < SyncWaitTime {
/* Chain in sync */ /* Chain in sync */
if atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) { if !atomic.CompareAndSwapInt32(&s.outOfSync, 0, 0) {
// already in sync, no signaling necessary
} else {
// transition from out of sync to in sync // transition from out of sync to in sync
s.chainSyncMx.Lock() s.chainSyncMx.Lock()
s.chainSyncFinished = true s.chainSyncFinished = true
s.chainSyncCond.Broadcast() s.chainSyncCond.Broadcast()
s.chainSyncMx.Unlock() s.chainSyncMx.Unlock()
} } // else already in sync, no signaling necessary
} }
// 2. protect the new tipset(s) // 2. protect the new tipset(s)
s.protectTipSets(apply) s.protectTipSets(apply)

View File

@ -32,7 +32,7 @@ func init() {
CompactionBoundary = 2 CompactionBoundary = 2
WarmupBoundary = 0 WarmupBoundary = 0
SyncWaitTime = time.Millisecond SyncWaitTime = time.Millisecond
logging.SetLogLevel("splitstore", "DEBUG") _ = logging.SetLogLevel("splitstore", "DEBUG")
} }
func testSplitStore(t *testing.T, cfg *Config) { func testSplitStore(t *testing.T, cfg *Config) {

View File

@ -1,6 +1,3 @@
/dns4/lotus-bootstrap.ipfsforce.com/tcp/41778/p2p/12D3KooWGhufNmZHF3sv48aQeS13ng5XVJZ9E6qy2Ms4VzqeUsHk
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
/dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt /dns4/node.glif.io/tcp/1235/p2p/12D3KooWBF8cpp65hp2u9LK5mh19x67ftAam84z9LsfaquTDSBpt
/dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt /dns4/bootstarp-0.1475.io/tcp/61256/p2p/12D3KooWRzCVDwHUkgdK7eRgnoXbjDAELhxPErjHzbRLguSV1aRt
/dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST /dns4/bootstrap-venus.mainnet.filincubator.com/tcp/8888/p2p/QmQu8C6deXwKvJP2D8B6QGyhngc3ZiDnFzEHBDx8yeBXST

View File

@ -145,10 +145,10 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
) )
if !strings.HasPrefix(bundle, "v") { if !strings.HasPrefix(bundle, "v") {
return nil, xerrors.Errorf("bundle bundle '%q' doesn't start with a 'v'", bundle) return nil, xerrors.Errorf("bundle '%q' doesn't start with a 'v'", bundle)
} }
if !strings.HasSuffix(bundle, archiveExt) { if !strings.HasSuffix(bundle, archiveExt) {
return nil, xerrors.Errorf("bundle bundle '%q' doesn't end with '%s'", bundle, archiveExt) return nil, xerrors.Errorf("bundle '%q' doesn't end with '%s'", bundle, archiveExt)
} }
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0) version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
if err != nil { if err != nil {

View File

@ -6,7 +6,7 @@ import (
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
) )
var EmbeddedBuiltinActorsMetadata []*BuiltinActorsMetadata = []*BuiltinActorsMetadata{{ var EmbeddedBuiltinActorsMetadata = []*BuiltinActorsMetadata{{
Network: "butterflynet", Network: "butterflynet",
Version: 8, Version: 8,

View File

@ -0,0 +1,107 @@
//go:build release
// +build release
package build_test
import (
"archive/tar"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"strings"
"testing"
"github.com/DataDog/zstd"
"github.com/ipfs/go-cid"
"github.com/ipld/go-car/v2"
"github.com/stretchr/testify/require"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/lotus/build"
)
func TestEmbeddedBuiltinActorsMetadata(t *testing.T) {
subjectsByVersionByNetworks := make(map[actorstypes.Version]map[string]*build.BuiltinActorsMetadata)
for _, subject := range build.EmbeddedBuiltinActorsMetadata {
if subject.BundleGitTag == "" {
// BundleGitTag is required to verify the SHA-256 checksum.
// The pack script only includes this for the latest network version, and it is good enough to only
// check the latest network version metadata. Hence the skip.
continue
}
v, ok := subjectsByVersionByNetworks[subject.Version]
if !ok {
v = make(map[string]*build.BuiltinActorsMetadata)
}
v[subject.Network] = subject
subjectsByVersionByNetworks[subject.Version] = v
}
for version, networks := range subjectsByVersionByNetworks {
cachedCar, err := os.Open(fmt.Sprintf("./actors/v%v.tar.zst", version))
require.NoError(t, err)
t.Cleanup(func() { require.NoError(t, cachedCar.Close()) })
tarReader := tar.NewReader(zstd.NewReader(cachedCar))
for {
header, err := tarReader.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
network := strings.TrimSuffix(strings.TrimPrefix(header.Name, "builtin-actors-"), ".car")
subject, found := networks[network]
if !found {
continue
}
shaURL := fmt.Sprintf("https://github.com/filecoin-project/builtin-actors/releases/download/%s/builtin-actors-%s.sha256", subject.BundleGitTag, subject.Network)
resp, err := http.Get(shaURL)
require.NoError(t, err, "failed to retrieve CAR SHA")
require.Equal(t, http.StatusOK, resp.StatusCode, "unexpected response status code while retrieving CAR SHA")
respBody, err := io.ReadAll(resp.Body)
require.NoError(t, resp.Body.Close())
require.NoError(t, err)
fields := strings.Fields(string(respBody))
require.Len(t, fields, 2)
wantShaHex := fields[0]
hasher := sha256.New()
reader, err := car.NewBlockReader(io.TeeReader(tarReader, hasher))
require.NoError(t, err)
require.EqualValues(t, 1, reader.Version)
require.Len(t, reader.Roots, 1, "expected exactly one root CID for builtin actors bundle network %s, version %v", subject.Network, subject.Version)
require.True(t, reader.Roots[0].Equals(subject.ManifestCid), "manifest CID does not match")
subjectActorsByCid := make(map[cid.Cid]string)
for name, c := range subject.Actors {
subjectActorsByCid[c] = name
}
for {
next, err := reader.Next()
if errors.Is(err, io.EOF) {
break
}
require.NoError(t, err)
name, found := subjectActorsByCid[next.Cid()]
if found {
t.Logf("OK: %sv%v/%s -> %s", subject.Network, subject.Version, name, next.Cid())
delete(subjectActorsByCid, next.Cid())
}
}
require.Empty(t, subjectActorsByCid, "ZST CAR bundle did not contain CIDs for all actors; missing: %v", subjectActorsByCid)
gotShaHex := hex.EncodeToString(hasher.Sum(nil))
require.Equal(t, wantShaHex, gotShaHex, "SHA-256 digest of ZST CAR bundle does not match builtin-actors release")
delete(networks, network)
}
require.Empty(t, networks, "CAR bundle did not contain CIDs for network; missing: %v", networks)
}
}

View File

@ -67,12 +67,10 @@ var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
Servers: []string{ Servers: []string{
"https://pl-eu.testnet.drand.sh", "https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh", "https://pl-us.testnet.drand.sh",
"https://pl-sin.testnet.drand.sh",
}, },
Relays: []string{ Relays: []string{
"/dnsaddr/pl-eu.testnet.drand.sh/", "/dnsaddr/pl-eu.testnet.drand.sh/",
"/dnsaddr/pl-us.testnet.drand.sh/", "/dnsaddr/pl-us.testnet.drand.sh/",
"/dnsaddr/pl-sin.testnet.drand.sh/",
}, },
IsChained: true, IsChained: true,
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,

View File

@ -2,7 +2,6 @@ package build
import ( import (
"bytes" "bytes"
"compress/gzip"
"embed" "embed"
"encoding/json" "encoding/json"
@ -12,17 +11,9 @@ import (
//go:embed openrpc //go:embed openrpc
var openrpcfs embed.FS var openrpcfs embed.FS
func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument { func mustReadOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
zr, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
log.Fatal(err)
}
m := apitypes.OpenRPCDocument{} m := apitypes.OpenRPCDocument{}
err = json.NewDecoder(zr).Decode(&m) err := json.NewDecoder(bytes.NewBuffer(data)).Decode(&m)
if err != nil {
log.Fatal(err)
}
err = zr.Close()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -30,33 +21,33 @@ func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
} }
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
data, err := openrpcfs.ReadFile("openrpc/full.json.gz") data, err := openrpcfs.ReadFile("openrpc/full.json")
if err != nil { if err != nil {
panic(err) panic(err)
} }
return mustReadGzippedOpenRPCDocument(data) return mustReadOpenRPCDocument(data)
} }
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
data, err := openrpcfs.ReadFile("openrpc/miner.json.gz") data, err := openrpcfs.ReadFile("openrpc/miner.json")
if err != nil { if err != nil {
panic(err) panic(err)
} }
return mustReadGzippedOpenRPCDocument(data) return mustReadOpenRPCDocument(data)
} }
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
data, err := openrpcfs.ReadFile("openrpc/worker.json.gz") data, err := openrpcfs.ReadFile("openrpc/worker.json")
if err != nil { if err != nil {
panic(err) panic(err)
} }
return mustReadGzippedOpenRPCDocument(data) return mustReadOpenRPCDocument(data)
} }
func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Gateway() apitypes.OpenRPCDocument {
data, err := openrpcfs.ReadFile("openrpc/gateway.json.gz") data, err := openrpcfs.ReadFile("openrpc/gateway.json")
if err != nil { if err != nil {
panic(err) panic(err)
} }
return mustReadGzippedOpenRPCDocument(data) return mustReadOpenRPCDocument(data)
} }

26867
build/openrpc/full.json Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

10267
build/openrpc/gateway.json Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

12744
build/openrpc/miner.json Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

5536
build/openrpc/worker.json Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -165,5 +165,5 @@ const BootstrapPeerThreshold = 4
// As per https://github.com/ethereum-lists/chains // As per https://github.com/ethereum-lists/chains
const Eip155ChainId = 314 const Eip155ChainId = 314
// we skip checks on message validity in this block to sidestep the zero-bls signature // WhitelistedBlock skips checks on message validity in this block to sidestep the zero-bls signature
var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi") var WhitelistedBlock = MustParseCid("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")

View File

@ -124,6 +124,7 @@ const MinimumBaseFee = 100
const PackingEfficiencyNum = 4 const PackingEfficiencyNum = 4
const PackingEfficiencyDenom = 5 const PackingEfficiencyDenom = 5
// revive:disable-next-line:exported
// Actor consts // Actor consts
// TODO: pieceSize unused from actors // TODO: pieceSize unused from actors
var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0) var MinDealDuration, MaxDealDuration = policy.DealDurationBounds(0)

View File

@ -37,7 +37,7 @@ func BuildTypeString() string {
} }
// BuildVersion is the local build version // BuildVersion is the local build version
const BuildVersion = "1.26.2" const BuildVersion = "1.27.0"
func UserVersion() string { func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {

View File

@ -153,7 +153,7 @@ type Partition interface {
UnprovenSectors() (bitfield.BitField, error) UnprovenSectors() (bitfield.BitField, error)
} }
type SectorOnChainInfo = minertypes.SectorOnChainInfo type SectorOnChainInfo = minertypes13.SectorOnChainInfo
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old
@ -256,6 +256,7 @@ type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
type SectorActivationManifest = minertypes13.SectorActivationManifest type SectorActivationManifest = minertypes13.SectorActivationManifest
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
type SectorUpdateManifest = minertypes13.SectorUpdateManifest type SectorUpdateManifest = minertypes13.SectorUpdateManifest
type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags
var QAPowerMax = minertypes.QAPowerMax var QAPowerMax = minertypes.QAPowerMax

View File

@ -219,7 +219,7 @@ type Partition interface {
UnprovenSectors() (bitfield.BitField, error) UnprovenSectors() (bitfield.BitField, error)
} }
type SectorOnChainInfo = minertypes.SectorOnChainInfo type SectorOnChainInfo = minertypes13.SectorOnChainInfo
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof, configWantSynthetic bool) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old
@ -322,6 +322,7 @@ type ProveCommitSectors3Params = minertypes13.ProveCommitSectors3Params
type SectorActivationManifest = minertypes13.SectorActivationManifest type SectorActivationManifest = minertypes13.SectorActivationManifest
type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params type ProveReplicaUpdates3Params = minertypes13.ProveReplicaUpdates3Params
type SectorUpdateManifest = minertypes13.SectorUpdateManifest type SectorUpdateManifest = minertypes13.SectorUpdateManifest
type SectorOnChainInfoFlags = minertypes13.SectorOnChainInfoFlags
var QAPowerMax = minertypes.QAPowerMax var QAPowerMax = minertypes.QAPowerMax

View File

@ -584,6 +584,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO
{{if (ge .v 7)}} {{if (ge .v 7)}}
SectorKeyCID: v{{.v}}.SectorKeyCID, SectorKeyCID: v{{.v}}.SectorKeyCID,
{{end}} {{end}}
{{if (ge .v 12)}}
PowerBaseEpoch: v{{.v}}.PowerBaseEpoch,
ReplacedDayReward: v{{.v}}.ReplacedDayReward,
Flags: SectorOnChainInfoFlags(v{{.v}}.Flags),
{{end}}
} }
return info return info
} }

View File

@ -545,6 +545,10 @@ func fromV12SectorOnChainInfo(v12 miner12.SectorOnChainInfo) SectorOnChainInfo {
ExpectedStoragePledge: v12.ExpectedStoragePledge, ExpectedStoragePledge: v12.ExpectedStoragePledge,
SectorKeyCID: v12.SectorKeyCID, SectorKeyCID: v12.SectorKeyCID,
PowerBaseEpoch: v12.PowerBaseEpoch,
ReplacedDayReward: v12.ReplacedDayReward,
Flags: SectorOnChainInfoFlags(v12.Flags),
} }
return info return info
} }

View File

@ -545,6 +545,10 @@ func fromV13SectorOnChainInfo(v13 miner13.SectorOnChainInfo) SectorOnChainInfo {
ExpectedStoragePledge: v13.ExpectedStoragePledge, ExpectedStoragePledge: v13.ExpectedStoragePledge,
SectorKeyCID: v13.SectorKeyCID, SectorKeyCID: v13.SectorKeyCID,
PowerBaseEpoch: v13.PowerBaseEpoch,
ReplacedDayReward: v13.ReplacedDayReward,
Flags: SectorOnChainInfoFlags(v13.Flags),
} }
return info return info
} }

View File

@ -203,7 +203,6 @@ func (db *DrandBeacon) VerifyEntry(entry types.BeaconEntry, prevEntrySig []byte)
} }
db.cacheValue(entry) db.cacheValue(entry)
return nil return nil
} }

View File

@ -3,6 +3,7 @@
package drand package drand
import ( import (
"bytes"
"context" "context"
"os" "os"
"testing" "testing"
@ -18,7 +19,12 @@ import (
func TestPrintGroupInfo(t *testing.T) { func TestPrintGroupInfo(t *testing.T) {
server := build.DrandConfigs[build.DrandTestnet].Servers[0] server := build.DrandConfigs[build.DrandTestnet].Servers[0]
c, err := hclient.New(server, nil, nil) chainInfo := build.DrandConfigs[build.DrandTestnet].ChainInfoJSON
drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(chainInfo)))
assert.NoError(t, err)
c, err := hclient.NewWithInfo(server, drandChain, nil)
assert.NoError(t, err) assert.NoError(t, err)
cg := c.(interface { cg := c.(interface {
FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error) FetchChainInfo(ctx context.Context, groupHash []byte) (*dchain.Info, error)

View File

@ -24,9 +24,16 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
ts = tss[0] ts = tss[0]
} }
if err := syncer.switchChain(ctx, ts); err != nil { hts := syncer.ChainStore().GetHeaviestTipSet()
return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) if !hts.Equals(ts) {
if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err != nil {
return xerrors.Errorf("failed to walk the chain when checkpointing: %w", err)
} else if !anc {
if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
} }
} // else new checkpoint is on the current chain, we definitely have the tipsets.
} // else current head, no need to switch.
if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil { if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil {
return xerrors.Errorf("failed to set the chain checkpoint: %w", err) return xerrors.Errorf("failed to set the chain checkpoint: %w", err)
@ -34,24 +41,3 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e
return nil return nil
} }
func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error {
hts := syncer.ChainStore().GetHeaviestTipSet()
if hts.Equals(ts) {
return nil
}
if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc {
return nil
}
// Otherwise, sync the chain and set the head.
if err := syncer.collectChain(ctx, ts, hts, true); err != nil {
return xerrors.Errorf("failed to collect chain for checkpoint: %w", err)
}
if err := syncer.ChainStore().SetHead(ctx, ts); err != nil {
return xerrors.Errorf("failed to set the chain head: %w", err)
}
return nil
}

View File

@ -220,7 +220,7 @@ func checkBlockMessages(ctx context.Context, sm *stmgr.StateManager, cs *store.C
// the sender exists and is an account actor, and the nonces make sense // the sender exists and is an account actor, and the nonces make sense
var sender address.Address var sender address.Address
if nv >= network.Version13 { if nv >= network.Version13 {
sender, err = st.LookupID(m.From) sender, err = st.LookupIDAddress(m.From)
if err != nil { if err != nil {
return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err) return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err)
} }

View File

@ -150,7 +150,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal) return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, consensus.ErrTemporal)
} }
if h.Timestamp > now { if h.Timestamp > now {
log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix()) log.Warnf("Got block from the future, but within threshold (%d > %d)", h.Timestamp, now)
} }
minerCheck := async.Err(func() error { minerCheck := async.Err(func() error {
@ -166,7 +166,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock)
} }
if types.BigCmp(pweight, b.Header.ParentWeight) != 0 { if types.BigCmp(pweight, b.Header.ParentWeight) != 0 {
return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)", return xerrors.Errorf("parent weight different: %s (header) != %s (computed)",
b.Header.ParentWeight, pweight) b.Header.ParentWeight, pweight)
} }

View File

@ -81,7 +81,7 @@ func init() {
return return
} }
// use value from environment // use value from environment
log.Infof("migration worker cound set from %s (%d)", EnvMigrationMaxWorkerCount, mwc) log.Infof("migration worker count set from %s (%d)", EnvMigrationMaxWorkerCount, mwc)
MigrationMaxWorkerCount = int(mwc) MigrationMaxWorkerCount = int(mwc)
return return
} }
@ -1712,14 +1712,14 @@ func upgradeActorsV10Common(
if stateRoot.Version != types.StateTreeVersion4 { if stateRoot.Version != types.StateTreeVersion4 {
return cid.Undef, xerrors.Errorf( return cid.Undef, xerrors.Errorf(
"expected state root version 4 for actors v9 upgrade, got %d", "expected state root version 4 for actors v10 upgrade, got %d",
stateRoot.Version, stateRoot.Version,
) )
} }
manifest, ok := actors.GetManifest(actorstypes.Version10) manifest, ok := actors.GetManifest(actorstypes.Version10)
if !ok { if !ok {
return cid.Undef, xerrors.Errorf("no manifest CID for v9 upgrade") return cid.Undef, xerrors.Errorf("no manifest CID for v10 upgrade")
} }
// Perform the migration // Perform the migration
@ -1893,7 +1893,7 @@ func UpgradeActorsV12(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M
} }
newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config) newRoot, err := upgradeActorsV12Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err) return cid.Undef, xerrors.Errorf("migrating actors v12 state: %w", err)
} }
return newRoot, nil return newRoot, nil
} }
@ -2210,7 +2210,7 @@ func UpgradeActorsV13(ctx context.Context, sm *stmgr.StateManager, cache stmgr.M
} }
newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config) newRoot, err := upgradeActorsV13Common(ctx, sm, cache, root, epoch, ts, config)
if err != nil { if err != nil {
return cid.Undef, xerrors.Errorf("migrating actors v11 state: %w", err) return cid.Undef, xerrors.Errorf("migrating actors v13 state: %w", err)
} }
return newRoot, nil return newRoot, nil
} }

View File

@ -375,6 +375,10 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet)
func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address, func (m *EventFilterManager) Install(ctx context.Context, minHeight, maxHeight abi.ChainEpoch, tipsetCid cid.Cid, addresses []address.Address,
keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) { keysWithCodec map[string][]types.ActorEventBlock, excludeReverted bool) (EventFilter, error) {
m.mu.Lock() m.mu.Lock()
if m.currentHeight == 0 {
// sync in progress, we haven't had an Apply
m.currentHeight = m.ChainStore.GetHeaviestTipSet().Height()
}
currentHeight := m.currentHeight currentHeight := m.currentHeight
m.mu.Unlock() m.mu.Unlock()

View File

@ -26,7 +26,7 @@ var pragmas = []string{
"PRAGMA temp_store = memory", "PRAGMA temp_store = memory",
"PRAGMA mmap_size = 30000000000", "PRAGMA mmap_size = 30000000000",
"PRAGMA page_size = 32768", "PRAGMA page_size = 32768",
"PRAGMA auto_vacuum = NONE", "PRAGMA auto_vacuum = NONE", // not useful until we implement GC
"PRAGMA automatic_index = OFF", "PRAGMA automatic_index = OFF",
"PRAGMA journal_mode = WAL", "PRAGMA journal_mode = WAL",
"PRAGMA read_uncommitted = ON", "PRAGMA read_uncommitted = ON",
@ -45,8 +45,10 @@ var ddls = []string{
reverted INTEGER NOT NULL reverted INTEGER NOT NULL
)`, )`,
`CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`, createIndexEventEmitterAddr,
`CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`, createIndexEventTipsetKeyCid,
createIndexEventHeight,
createIndexEventReverted,
`CREATE TABLE IF NOT EXISTS event_entry ( `CREATE TABLE IF NOT EXISTS event_entry (
event_id INTEGER, event_id INTEGER,
@ -57,7 +59,9 @@ var ddls = []string{
value BLOB NOT NULL value BLOB NOT NULL
)`, )`,
`CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)`, createIndexEventEntryIndexedKey,
createIndexEventEntryCodecValue,
createIndexEventEntryEventId,
// metadata containing version of schema // metadata containing version of schema
`CREATE TABLE IF NOT EXISTS _meta ( `CREATE TABLE IF NOT EXISTS _meta (
@ -67,6 +71,7 @@ var ddls = []string{
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`, `INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
`INSERT OR IGNORE INTO _meta (version) VALUES (2)`, `INSERT OR IGNORE INTO _meta (version) VALUES (2)`,
`INSERT OR IGNORE INTO _meta (version) VALUES (3)`, `INSERT OR IGNORE INTO _meta (version) VALUES (3)`,
`INSERT OR IGNORE INTO _meta (version) VALUES (4)`,
} }
var ( var (
@ -74,13 +79,22 @@ var (
) )
const ( const (
schemaVersion = 3 schemaVersion = 4
eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)`
revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?` revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?`
restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
createIndexEventEmitterAddr = `CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`
createIndexEventTipsetKeyCid = `CREATE INDEX IF NOT EXISTS event_tipset_key_cid ON event (tipset_key_cid);`
createIndexEventHeight = `CREATE INDEX IF NOT EXISTS event_height ON event (height);`
createIndexEventReverted = `CREATE INDEX IF NOT EXISTS event_reverted ON event (reverted);`
createIndexEventEntryIndexedKey = `CREATE INDEX IF NOT EXISTS event_entry_indexed_key ON event_entry (indexed, key);`
createIndexEventEntryCodecValue = `CREATE INDEX IF NOT EXISTS event_entry_codec_value ON event_entry (codec, value);`
createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);`
) )
type EventIndex struct { type EventIndex struct {
@ -125,43 +139,43 @@ func (ei *EventIndex) initStatements() (err error) {
func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error { func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error {
now := time.Now() now := time.Now()
tx, err := ei.db.Begin() tx, err := ei.db.BeginTx(ctx, nil)
if err != nil { if err != nil {
return xerrors.Errorf("begin transaction: %w", err) return xerrors.Errorf("begin transaction: %w", err)
} }
// rollback the transaction (a no-op if the transaction was already committed) // rollback the transaction (a no-op if the transaction was already committed)
defer tx.Rollback() //nolint:errcheck defer func() { _ = tx.Rollback() }()
// create some temporary indices to help speed up the migration // create some temporary indices to help speed up the migration
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)") _, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)")
if err != nil { if err != nil {
return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err) return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err)
} }
_, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)") _, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)")
if err != nil { if err != nil {
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err)
} }
stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?") stmtDeleteOffChainEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid!=? and height=?")
if err != nil { if err != nil {
return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err) return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err)
} }
stmtSelectEvent, err := tx.Prepare("SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1") stmtSelectEvent, err := tx.PrepareContext(ctx, "SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1")
if err != nil { if err != nil {
return xerrors.Errorf("prepare stmtSelectEvent: %w", err) return xerrors.Errorf("prepare stmtSelectEvent: %w", err)
} }
stmtDeleteEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid=? AND id<?") stmtDeleteEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid=? AND id<?")
if err != nil { if err != nil {
return xerrors.Errorf("prepare stmtDeleteEvent: %w", err) return xerrors.Errorf("prepare stmtDeleteEvent: %w", err)
} }
// get the lowest height tipset // get the lowest height tipset
var minHeight sql.NullInt64 var minHeight sql.NullInt64
err = ei.db.QueryRow("SELECT MIN(height) FROM event").Scan(&minHeight) err = ei.db.QueryRowContext(ctx, "SELECT MIN(height) FROM event").Scan(&minHeight)
if err != nil { if err != nil {
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
return nil return nil
} }
@ -198,7 +212,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
var eventId sql.NullInt64 var eventId sql.NullInt64
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId)
if err != nil { if err != nil {
if err == sql.ErrNoRows { if errors.Is(err, sql.ErrNoRows) {
continue continue
} }
return xerrors.Errorf("select event: %w", err) return xerrors.Errorf("select event: %w", err)
@ -224,7 +238,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
// delete all entries that have an event_id that doesn't exist (since we don't have a foreign // delete all entries that have an event_id that doesn't exist (since we don't have a foreign
// key constraint that gives us cascading deletes) // key constraint that gives us cascading deletes)
res, err := tx.Exec("DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)") res, err := tx.ExecContext(ctx, "DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)")
if err != nil { if err != nil {
return xerrors.Errorf("delete event_entry: %w", err) return xerrors.Errorf("delete event_entry: %w", err)
} }
@ -233,39 +247,143 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
if err != nil { if err != nil {
return xerrors.Errorf("rows affected: %w", err) return xerrors.Errorf("rows affected: %w", err)
} }
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected) log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected)
// drop the temporary indices after the migration // drop the temporary indices after the migration
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid") _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid")
if err != nil { if err != nil {
return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) return xerrors.Errorf("drop index tmp_tipset_key_cid: %w", err)
} }
_, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid") _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_height_tipset_key_cid")
if err != nil { if err != nil {
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err) return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
} }
// original v2 migration introduced an index:
// CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)
// which has subsequently been removed in v4, so it's omitted here
// increment the schema version to 2 in _meta table.
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (2)")
if err != nil {
return xerrors.Errorf("increment _meta version: %w", err)
}
err = tx.Commit() err = tx.Commit()
if err != nil { if err != nil {
return xerrors.Errorf("commit transaction: %w", err) return xerrors.Errorf("commit transaction: %w", err)
} }
// during the migration, we have likely increased the WAL size a lot, so lets do some log.Infof("Successfully migrated event index from version 1 to version 2 in %s", time.Since(now))
return nil
}
// migrateToVersion3 migrates the schema from version 2 to version 3 by creating two indices:
// 1) an index on the event.emitter_addr column, and 2) an index on the event_entry.key column.
func (ei *EventIndex) migrateToVersion3(ctx context.Context) error {
now := time.Now()
tx, err := ei.db.BeginTx(ctx, nil)
if err != nil {
return xerrors.Errorf("begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
// create index on event.emitter_addr.
_, err = tx.ExecContext(ctx, createIndexEventEmitterAddr)
if err != nil {
return xerrors.Errorf("create index event_emitter_addr: %w", err)
}
// original v3 migration introduced an index:
// CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)
// which has subsequently been removed in v4, so it's omitted here
// increment the schema version to 3 in _meta table.
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (3)")
if err != nil {
return xerrors.Errorf("increment _meta version: %w", err)
}
err = tx.Commit()
if err != nil {
return xerrors.Errorf("commit transaction: %w", err)
}
log.Infof("Successfully migrated event index from version 2 to version 3 in %s", time.Since(now))
return nil
}
// migrateToVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match
// the query patterns of the event filter.
//
// First it drops indexes introduced in previous migrations:
// 1. the index on the event.height and event.tipset_key columns
// 2. the index on the event_entry.key column
//
// And then creating the following indices:
// 1. an index on the event.tipset_key_cid column
// 2. an index on the event.height column
// 3. an index on the event.reverted column
// 4. an index on the event_entry.indexed and event_entry.key columns
// 5. an index on the event_entry.codec and event_entry.value columns
// 6. an index on the event_entry.event_id column
func (ei *EventIndex) migrateToVersion4(ctx context.Context) error {
now := time.Now()
tx, err := ei.db.BeginTx(ctx, nil)
if err != nil {
return xerrors.Errorf("begin transaction: %w", err)
}
defer func() { _ = tx.Rollback() }()
for _, create := range []struct {
desc string
query string
}{
{"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"},
{"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"},
{"create index event_tipset_key_cid", createIndexEventTipsetKeyCid},
{"create index event_height", createIndexEventHeight},
{"create index event_reverted", createIndexEventReverted},
{"create index event_entry_indexed_key", createIndexEventEntryIndexedKey},
{"create index event_entry_codec_value", createIndexEventEntryCodecValue},
{"create index event_entry_event_id", createIndexEventEntryEventId},
} {
_, err = tx.ExecContext(ctx, create.query)
if err != nil {
return xerrors.Errorf("%s: %w", create.desc, err)
}
}
if _, err = tx.Exec("INSERT OR IGNORE INTO _meta (version) VALUES (4)"); err != nil {
return xerrors.Errorf("increment _meta version: %w", err)
}
err = tx.Commit()
if err != nil {
return xerrors.Errorf("commit transaction: %w", err)
}
ei.vacuumDBAndCheckpointWAL(ctx)
log.Infof("Successfully migrated event index from version 3 to version 4 in %s", time.Since(now))
return nil
}
func (ei *EventIndex) vacuumDBAndCheckpointWAL(ctx context.Context) {
// During the large migrations, we have likely increased the WAL size a lot, so lets do some
// simple DB administration to free up space (VACUUM followed by truncating the WAL file) // simple DB administration to free up space (VACUUM followed by truncating the WAL file)
// as this would be a good time to do it when no other writes are happening // as this would be a good time to do it when no other writes are happening.
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration") log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
_, err = ei.db.Exec("VACUUM") _, err := ei.db.ExecContext(ctx, "VACUUM")
if err != nil { if err != nil {
log.Warnf("error vacuuming database: %s", err) log.Warnf("error vacuuming database: %s", err)
} }
_, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)") _, err = ei.db.ExecContext(ctx, "PRAGMA wal_checkpoint(TRUNCATE)")
if err != nil { if err != nil {
log.Warnf("error checkpointing wal: %s", err) log.Warnf("error checkpointing wal: %s", err)
} }
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
return nil
} }
func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) { func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) {
@ -283,8 +401,8 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor
eventIndex := EventIndex{db: db} eventIndex := EventIndex{db: db}
q, err := db.Query("SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';") q, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';")
if err == sql.ErrNoRows || !q.Next() { if errors.Is(err, sql.ErrNoRows) || !q.Next() {
// empty database, create the schema // empty database, create the schema
for _, ddl := range ddls { for _, ddl := range ddls {
if _, err := db.Exec(ddl); err != nil { if _, err := db.Exec(ddl); err != nil {
@ -305,40 +423,33 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor
} }
if version == 1 { if version == 1 {
log.Infof("upgrading event index from version 1 to version 2") log.Infof("Upgrading event index from version 1 to version 2")
err = eventIndex.migrateToVersion2(ctx, chainStore) err = eventIndex.migrateToVersion2(ctx, chainStore)
if err != nil { if err != nil {
_ = db.Close() _ = db.Close()
return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err) return nil, xerrors.Errorf("could not migrate event index schema from version 1 to version 2: %w", err)
} }
// to upgrade to version version 2 we only need to create an index on the event table
// which means we can just recreate the schema (it will not have any effect on existing data)
for _, ddl := range ddls {
if _, err := db.Exec(ddl); err != nil {
_ = db.Close()
return nil, xerrors.Errorf("could not upgrade index to version 2, exec ddl %q: %w", ddl, err)
}
}
version = 2 version = 2
} }
if version == 2 { if version == 2 {
log.Infof("upgrading event index from version 2 to version 3") log.Infof("Upgrading event index from version 2 to version 3")
err = eventIndex.migrateToVersion3(ctx)
// to upgrade to version 3 we only need to create an index on the event_entry.key column if err != nil {
// and on the event.emitter_addr column
// which means we can just reapply the schema (it will not have any effect on existing data)
for _, ddl := range ddls {
if _, err := db.Exec(ddl); err != nil {
_ = db.Close() _ = db.Close()
return nil, xerrors.Errorf("could not upgrade index to version 3, exec ddl %q: %w", ddl, err) return nil, xerrors.Errorf("could not migrate event index schema from version 2 to version 3: %w", err)
} }
version = 3
} }
version = 3 if version == 3 {
log.Infof("Upgrading event index from version 3 to version 4")
err = eventIndex.migrateToVersion4(ctx)
if err != nil {
_ = db.Close()
return nil, xerrors.Errorf("could not migrate event index schema from version 3 to version 4: %w", err)
}
version = 4
} }
if version != schemaVersion { if version != schemaVersion {
@ -369,9 +480,9 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
return xerrors.Errorf("begin transaction: %w", err) return xerrors.Errorf("begin transaction: %w", err)
} }
// rollback the transaction (a no-op if the transaction was already committed) // rollback the transaction (a no-op if the transaction was already committed)
defer tx.Rollback() //nolint:errcheck defer func() { _ = tx.Rollback() }()
// lets handle the revert case first, since its simpler and we can simply mark all events events in this tipset as reverted and return // lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return
if revert { if revert {
_, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) _, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
if err != nil { if err != nil {
@ -526,7 +637,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude
} }
if len(f.addresses) > 0 { if len(f.addresses) > 0 {
subclauses := []string{} subclauses := make([]string, 0, len(f.addresses))
for _, addr := range f.addresses { for _, addr := range f.addresses {
subclauses = append(subclauses, "emitter_addr=?") subclauses = append(subclauses, "emitter_addr=?")
values = append(values, addr.Bytes()) values = append(values, addr.Bytes())
@ -543,7 +654,7 @@ func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, exclude
joins = append(joins, fmt.Sprintf("event_entry %s on event.id=%[1]s.event_id", joinAlias)) joins = append(joins, fmt.Sprintf("event_entry %s on event.id=%[1]s.event_id", joinAlias))
clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias))
values = append(values, key) values = append(values, key)
subclauses := []string{} subclauses := make([]string, 0, len(vals))
for _, val := range vals { for _, val := range vals {
subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias))
values = append(values, val.Value, val.Codec) values = append(values, val.Value, val.Codec)

View File

@ -369,7 +369,7 @@ func (sp *StatePredicates) OnMinerPreCommitChange() DiffMinerActorStateFunc {
// DiffPaymentChannelStateFunc is function that compares two states for the payment channel // DiffPaymentChannelStateFunc is function that compares two states for the payment channel
type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error) type DiffPaymentChannelStateFunc func(ctx context.Context, oldState paych.State, newState paych.State) (changed bool, user UserData, err error)
// OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the the payment channel actor // OnPaymentChannelActorChanged calls diffPaymentChannelState when the state changes for the payment channel actor
func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc { func (sp *StatePredicates) OnPaymentChannelActorChanged(paychAddr address.Address, diffPaymentChannelState DiffPaymentChannelStateFunc) DiffTipSetKeyFunc {
return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) { return sp.OnActorStateChanged(paychAddr, func(ctx context.Context, oldActorState, newActorState *types.Actor) (changed bool, user UserData, err error) {
oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState) oldState, err := paych.Load(adt.WrapStore(ctx, sp.cst), oldActorState)

View File

@ -38,6 +38,7 @@ const (
ReadResMinSpeed = 50 << 10 ReadResMinSpeed = 50 << 10
ShufflePeersPrefix = 16 ShufflePeersPrefix = 16
WriteResDeadline = 60 * time.Second WriteResDeadline = 60 * time.Second
streamReadDeadline = 10 * time.Second
) )
// FIXME: Rename. Make private. // FIXME: Rename. Make private.

View File

@ -40,11 +40,15 @@ func (s *server) HandleStream(stream inet.Stream) {
defer stream.Close() //nolint:errcheck defer stream.Close() //nolint:errcheck
_ = stream.SetReadDeadline(time.Now().Add(streamReadDeadline))
var req Request var req Request
if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil { if err := cborutil.ReadCborRPC(bufio.NewReader(stream), &req); err != nil {
_ = stream.SetReadDeadline(time.Time{})
log.Warnf("failed to read block sync request: %s", err) log.Warnf("failed to read block sync request: %s", err)
return return
} }
_ = stream.SetReadDeadline(time.Time{})
log.Debugw("block sync request", log.Debugw("block sync request",
"start", req.Head, "len", req.Length) "start", req.Head, "len", req.Length)
@ -137,7 +141,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re
chain, err := collectChainSegment(ctx, s.cs, req) chain, err := collectChainSegment(ctx, s.cs, req)
if err != nil { if err != nil {
log.Warn("block sync request: collectChainSegment failed: ", err) log.Info("block sync request: collectChainSegment failed: ", err)
return &Response{ return &Response{
Status: InternalError, Status: InternalError,
ErrorMessage: err.Error(), ErrorMessage: err.Error(),
@ -171,17 +175,11 @@ func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validat
} }
if req.options.IncludeMessages { if req.options.IncludeMessages {
bmsgs, bmincl, smsgs, smincl, err := gatherMessages(ctx, cs, ts) bst.Messages, err = gatherMessages(ctx, cs, ts)
if err != nil { if err != nil {
return nil, xerrors.Errorf("gather messages failed: %w", err) return nil, xerrors.Errorf("gather messages failed: %w", err)
} }
// FIXME: Pass the response to `gatherMessages()` and set all this there.
bst.Messages = &CompactedMessages{}
bst.Messages.Bls = bmsgs
bst.Messages.BlsIncludes = bmincl
bst.Messages.Secpk = smsgs
bst.Messages.SecpkIncludes = smincl
} }
bstips = append(bstips, &bst) bstips = append(bstips, &bst)
@ -196,16 +194,16 @@ func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validat
} }
} }
func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) (*CompactedMessages, error) {
msgs := new(CompactedMessages)
blsmsgmap := make(map[cid.Cid]uint64) blsmsgmap := make(map[cid.Cid]uint64)
secpkmsgmap := make(map[cid.Cid]uint64) secpkmsgmap := make(map[cid.Cid]uint64)
var secpkincl, blsincl [][]uint64
var blscids, secpkcids []cid.Cid var blscids, secpkcids []cid.Cid
for _, block := range ts.Blocks() { for _, block := range ts.Blocks() {
bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages) bc, sc, err := cs.ReadMsgMetaCids(ctx, block.Messages)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, err
} }
// FIXME: DRY. Use `chain.Message` interface. // FIXME: DRY. Use `chain.Message` interface.
@ -220,7 +218,7 @@ func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet)
bmi = append(bmi, i) bmi = append(bmi, i)
} }
blsincl = append(blsincl, bmi) msgs.BlsIncludes = append(msgs.BlsIncludes, bmi)
smi := make([]uint64, 0, len(sc)) smi := make([]uint64, 0, len(sc))
for _, m := range sc { for _, m := range sc {
@ -233,18 +231,19 @@ func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet)
smi = append(smi, i) smi = append(smi, i)
} }
secpkincl = append(secpkincl, smi) msgs.SecpkIncludes = append(msgs.SecpkIncludes, smi)
} }
blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) var err error
msgs.Bls, err = cs.LoadMessagesFromCids(ctx, blscids)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, err
} }
secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) msgs.Secpk, err = cs.LoadSignedMessagesFromCids(ctx, secpkcids)
if err != nil { if err != nil {
return nil, nil, nil, nil, err return nil, err
} }
return blsmsgs, blsincl, secpkmsgs, secpkincl, nil return msgs, nil
} }

View File

@ -778,9 +778,7 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs) _, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock() mp.curTsLk.Lock()
if tmpCurTs == mp.curTs { if tmpCurTs != mp.curTs {
//with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
} else {
//curTs has been updated so we want to cache the new one: //curTs has been updated so we want to cache the new one:
tmpCurTs = mp.curTs tmpCurTs = mp.curTs
//we want to release the lock, cache the computations then grab it again //we want to release the lock, cache the computations then grab it again
@ -789,7 +787,7 @@ func (mp *MessagePool) Add(ctx context.Context, m *types.SignedMessage) error {
_, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs) _, _ = mp.getStateNonce(ctx, m.Message.From, tmpCurTs)
mp.curTsLk.Lock() mp.curTsLk.Lock()
//now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time //now that we have the lock, we continue, we could do this as a loop forever, but that's bad to loop forever, and this was added as an optimization and it seems once is enough because the computation < block time
} } // else with the lock enabled, mp.curTs is the same Ts as we just had, so we know that our computations are cached
defer mp.curTsLk.Unlock() defer mp.curTsLk.Unlock()

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"time" "time"
lru "github.com/hashicorp/golang-lru/v2"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
pubsub "github.com/libp2p/go-libp2p-pubsub" pubsub "github.com/libp2p/go-libp2p-pubsub"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -16,6 +17,8 @@ import (
"github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/must"
"github.com/filecoin-project/lotus/lib/result"
) )
var ( var (
@ -39,10 +42,19 @@ type Provider interface {
IsLite() bool IsLite() bool
} }
type actorCacheKey struct {
types.TipSetKey
address.Address
}
var nonceCacheSize = 128
type mpoolProvider struct { type mpoolProvider struct {
sm *stmgr.StateManager sm *stmgr.StateManager
ps *pubsub.PubSub ps *pubsub.PubSub
liteActorCache *lru.Cache[actorCacheKey, result.Result[*types.Actor]]
lite MpoolNonceAPI lite MpoolNonceAPI
} }
@ -53,18 +65,31 @@ func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider {
} }
func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider { func NewProviderLite(sm *stmgr.StateManager, ps *pubsub.PubSub, noncer MpoolNonceAPI) Provider {
return &mpoolProvider{sm: sm, ps: ps, lite: noncer} return &mpoolProvider{
sm: sm,
ps: ps,
lite: noncer,
liteActorCache: must.One(lru.New[actorCacheKey, result.Result[*types.Actor]](nonceCacheSize)),
}
} }
func (mpp *mpoolProvider) IsLite() bool { func (mpp *mpoolProvider) IsLite() bool {
return mpp.lite != nil return mpp.lite != nil
} }
func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (*types.Actor, error) { func (mpp *mpoolProvider) getActorLite(addr address.Address, ts *types.TipSet) (act *types.Actor, err error) {
if !mpp.IsLite() { if !mpp.IsLite() {
return nil, errors.New("should not use getActorLite on non lite Provider") return nil, errors.New("should not use getActorLite on non lite Provider")
} }
if c, ok := mpp.liteActorCache.Get(actorCacheKey{ts.Key(), addr}); ok {
return c.Unwrap()
}
defer func() {
mpp.liteActorCache.Add(actorCacheKey{ts.Key(), addr}, result.Wrap(act, err))
}()
n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key()) n, err := mpp.lite.GetNonce(context.TODO(), addr, ts.Key())
if err != nil { if err != nil {
return nil, xerrors.Errorf("getting nonce over lite: %w", err) return nil, xerrors.Errorf("getting nonce over lite: %w", err)

View File

@ -400,7 +400,7 @@ tailLoop:
continue tailLoop continue tailLoop
} }
// the merge loop ended after processing all the chains and we we probably have still // the merge loop ended after processing all the chains and we probably have still
// gas to spare; end the loop. // gas to spare; end the loop.
break break
} }

View File

@ -1191,7 +1191,7 @@ func TestOptimalMessageSelection2(t *testing.T) {
func TestOptimalMessageSelection3(t *testing.T) { func TestOptimalMessageSelection3(t *testing.T) {
//stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001 //stm: @TOKEN_WALLET_NEW_001, @CHAIN_MEMPOOL_SELECT_001
// this test uses 10 actors sending a block of messages to each other, with the the first // this test uses 10 actors sending a block of messages to each other, with the first
// actors paying higher gas premium than the subsequent actors. // actors paying higher gas premium than the subsequent actors.
// We select with a low ticket quality; the chain dependent merging algorithm should pick // We select with a low ticket quality; the chain dependent merging algorithm should pick
// messages from the median actor from the start // messages from the median actor from the start
@ -1321,7 +1321,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
mustAdd(t, mp, m) mustAdd(t, mp, m)
} }
logging.SetLogLevel("messagepool", "error") _ = logging.SetLogLevel("messagepool", "error")
// 1. greedy selection // 1. greedy selection
gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts) gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts)
@ -1414,7 +1414,7 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu
t.Logf("Average reward boost: %f", rewardBoost) t.Logf("Average reward boost: %f", rewardBoost)
t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12) t.Logf("Average best tq reward: %f", totalBestTQReward/runs/1e12)
logging.SetLogLevel("messagepool", "info") _ = logging.SetLogLevel("messagepool", "info")
return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12 return capacityBoost, rewardBoost, totalBestTQReward / runs / 1e12
} }

View File

@ -1,98 +0,0 @@
package messagesigner
import (
"context"
"github.com/google/uuid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
"github.com/libp2p/go-libp2p/core/peer"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/types"
consensus "github.com/filecoin-project/lotus/lib/consensus/raft"
"github.com/filecoin-project/lotus/node/modules/dtypes"
)
type MessageSignerConsensus struct {
MsgSigner
Consensus *consensus.Consensus
}
func NewMessageSignerConsensus(
wallet api.Wallet,
mpool messagepool.MpoolNonceAPI,
ds dtypes.MetadataDS,
consensus *consensus.Consensus) *MessageSignerConsensus {
ds = namespace.Wrap(ds, datastore.NewKey("/message-signer-consensus/"))
return &MessageSignerConsensus{
MsgSigner: &MessageSigner{
wallet: wallet,
mpool: mpool,
ds: ds,
},
Consensus: consensus,
}
}
func (ms *MessageSignerConsensus) IsLeader(ctx context.Context) bool {
return ms.Consensus.IsLeader(ctx)
}
func (ms *MessageSignerConsensus) RedirectToLeader(ctx context.Context, method string, arg interface{}, ret interface{}) (bool, error) {
ok, err := ms.Consensus.RedirectToLeader(method, arg, ret.(*types.SignedMessage))
if err != nil {
return ok, err
}
return ok, nil
}
func (ms *MessageSignerConsensus) SignMessage(
ctx context.Context,
msg *types.Message,
spec *api.MessageSendSpec,
cb func(*types.SignedMessage) error) (*types.SignedMessage, error) {
signedMsg, err := ms.MsgSigner.SignMessage(ctx, msg, spec, cb)
if err != nil {
return nil, err
}
op := &consensus.ConsensusOp{
Nonce: signedMsg.Message.Nonce,
Uuid: spec.MsgUuid,
Addr: signedMsg.Message.From,
SignedMsg: signedMsg,
}
err = ms.Consensus.Commit(ctx, op)
if err != nil {
return nil, err
}
return signedMsg, nil
}
func (ms *MessageSignerConsensus) GetSignedMessage(ctx context.Context, uuid uuid.UUID) (*types.SignedMessage, error) {
cstate, err := ms.Consensus.State(ctx)
if err != nil {
return nil, err
}
//cstate := state.(Consensus.RaftState)
msg, ok := cstate.MsgUuids[uuid]
if !ok {
return nil, xerrors.Errorf("Msg with Uuid %s not available", uuid)
}
return msg, nil
}
func (ms *MessageSignerConsensus) GetRaftState(ctx context.Context) (*consensus.RaftState, error) {
return ms.Consensus.State(ctx)
}
func (ms *MessageSignerConsensus) Leader(ctx context.Context) (peer.ID, error) {
return ms.Consensus.Leader(ctx)
}

View File

@ -184,9 +184,8 @@ func (sr *stateRand) GetBeaconRandomness(ctx context.Context, filecoinEpoch abi.
return sr.getBeaconRandomnessV3(ctx, filecoinEpoch) return sr.getBeaconRandomnessV3(ctx, filecoinEpoch)
} else if nv == network.Version13 { } else if nv == network.Version13 {
return sr.getBeaconRandomnessV2(ctx, filecoinEpoch) return sr.getBeaconRandomnessV2(ctx, filecoinEpoch)
} else {
return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
} }
return sr.getBeaconRandomnessV1(ctx, filecoinEpoch)
} }
func (sr *stateRand) DrawChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { func (sr *stateRand) DrawChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) {

View File

@ -230,7 +230,7 @@ func NewStateTree(cst cbor.IpldStore, ver types.StateTreeVersion) (*StateTree, e
Store: cst, Store: cst,
snaps: newStateSnaps(), snaps: newStateSnaps(),
} }
s.lookupIDFun = s.lookupIDinternal s.lookupIDFun = s.lookupInternalIDAddress
return s, nil return s, nil
} }
@ -302,13 +302,13 @@ func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) {
Store: cst, Store: cst,
snaps: newStateSnaps(), snaps: newStateSnaps(),
} }
s.lookupIDFun = s.lookupIDinternal s.lookupIDFun = s.lookupInternalIDAddress
return s, nil return s, nil
} }
func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
iaddr, err := st.LookupID(addr) iaddr, err := st.LookupIDAddress(addr)
if err != nil { if err != nil {
return xerrors.Errorf("ID lookup failed: %w", err) return xerrors.Errorf("ID lookup failed: %w", err)
} }
@ -318,7 +318,7 @@ func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error {
return nil return nil
} }
func (st *StateTree) lookupIDinternal(addr address.Address) (address.Address, error) { func (st *StateTree) lookupInternalIDAddress(addr address.Address) (address.Address, error) {
act, err := st.GetActor(init_.Address) act, err := st.GetActor(init_.Address)
if err != nil { if err != nil {
return address.Undef, xerrors.Errorf("getting init actor: %w", err) return address.Undef, xerrors.Errorf("getting init actor: %w", err)
@ -339,8 +339,8 @@ func (st *StateTree) lookupIDinternal(addr address.Address) (address.Address, er
return a, err return a, err
} }
// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. // LookupIDAddress gets the ID address of this actor's `addr` stored in the `InitActor`.
func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { func (st *StateTree) LookupIDAddress(addr address.Address) (address.Address, error) {
if addr.Protocol() == address.ID { if addr.Protocol() == address.ID {
return addr, nil return addr, nil
} }
@ -366,7 +366,7 @@ func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) {
} }
// Transform `addr` to its ID format. // Transform `addr` to its ID format.
iaddr, err := st.LookupID(addr) iaddr, err := st.LookupIDAddress(addr)
if err != nil { if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)
@ -411,7 +411,7 @@ func (st *StateTree) DeleteActor(addr address.Address) error {
return xerrors.Errorf("DeleteActor called on undefined address") return xerrors.Errorf("DeleteActor called on undefined address")
} }
iaddr, err := st.LookupID(addr) iaddr, err := st.LookupIDAddress(addr)
if err != nil { if err != nil {
if xerrors.Is(err, types.ErrActorNotFound) { if xerrors.Is(err, types.ErrActorNotFound) {
return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err)

View File

@ -542,7 +542,7 @@ func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address,
return api.MarketBalance{}, err return api.MarketBalance{}, err
} }
addr, err = sm.LookupID(ctx, addr, ts) addr, err = sm.LookupIDAddress(ctx, addr, ts)
if err != nil { if err != nil {
return api.MarketBalance{}, err return api.MarketBalance{}, err
} }

View File

@ -13,6 +13,16 @@ import (
) )
func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) { func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
return sm.tipSetState(ctx, ts, false)
}
// Recompute the tipset state without trying to lookup a pre-computed result in the chainstore.
// Useful if we know that our local chain-state isn't complete (e.g., we've discarded the events).
func (sm *StateManager) RecomputeTipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) {
return sm.tipSetState(ctx, ts, true)
}
func (sm *StateManager) tipSetState(ctx context.Context, ts *types.TipSet, recompute bool) (st cid.Cid, rec cid.Cid, err error) {
ctx, span := trace.StartSpan(ctx, "tipSetState") ctx, span := trace.StartSpan(ctx, "tipSetState")
defer span.End() defer span.End()
if span.IsRecordingEvents() { if span.IsRecordingEvents() {
@ -65,9 +75,11 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c
// First, try to find the tipset in the current chain. If found, we can avoid re-executing // First, try to find the tipset in the current chain. If found, we can avoid re-executing
// it. // it.
if !recompute {
if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found { if st, rec, found := tryLookupTipsetState(ctx, sm.cs, ts); found {
return st, rec, nil return st, rec, nil
} }
}
st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false) st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor, false)
if err != nil { if err != nil {

View File

@ -178,26 +178,31 @@ func (sm *StateManager) HandleStateForks(ctx context.Context, root cid.Cid, heig
retCid := root retCid := root
u := sm.stateMigrations[height] u := sm.stateMigrations[height]
if u != nil && u.upgrade != nil { if u != nil && u.upgrade != nil {
if height != build.UpgradeWatermelonFixHeight {
migCid, ok, err := u.migrationResultCache.Get(ctx, root) migCid, ok, err := u.migrationResultCache.Get(ctx, root)
if err == nil { if err == nil {
if ok { if ok {
log.Infow("CACHED migration", "height", height, "from", root, "to", migCid) log.Infow("CACHED migration", "height", height, "from", root, "to", migCid)
foundMigratedRoot, err := sm.ChainStore().StateBlockstore().Has(ctx, migCid)
if err != nil {
log.Errorw("failed to check whether previous migration result is present", "err", err)
} else if !foundMigratedRoot {
log.Errorw("cached migration result not found in blockstore, running migration again")
u.migrationResultCache.Delete(ctx, root)
} else {
return migCid, nil return migCid, nil
} }
}
} else if !errors.Is(err, datastore.ErrNotFound) { } else if !errors.Is(err, datastore.ErrNotFound) {
log.Errorw("failed to lookup previous migration result", "err", err) log.Errorw("failed to lookup previous migration result", "err", err)
} else { } else {
log.Debug("no cached migration found, migrating from scratch") log.Debug("no cached migration found, migrating from scratch")
} }
}
startTime := time.Now() startTime := time.Now()
log.Warnw("STARTING migration", "height", height, "from", root) log.Warnw("STARTING migration", "height", height, "from", root)
// Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may // Yes, we clone the cache, even for the final upgrade epoch. Why? Reverts. We may
// have to migrate multiple times. // have to migrate multiple times.
tmpCache := u.cache.Clone() tmpCache := u.cache.Clone()
var err error
retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts) retCid, err = u.upgrade(ctx, sm, tmpCache, cb, root, height, ts)
if err != nil { if err != nil {
log.Errorw("FAILED migration", "height", height, "from", root, "error", err) log.Errorw("FAILED migration", "height", height, "from", root, "error", err)

View File

@ -375,6 +375,20 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) {
} }
func TestForkPreMigration(t *testing.T) { func TestForkPreMigration(t *testing.T) {
// Backup the original value of the DISABLE_PRE_MIGRATIONS environment variable
originalValue, _ := os.LookupEnv("LOTUS_DISABLE_PRE_MIGRATIONS")
// Unset the DISABLE_PRE_MIGRATIONS environment variable for the test
if err := os.Unsetenv("LOTUS_DISABLE_PRE_MIGRATIONS"); err != nil {
t.Fatalf("failed to unset LOTUS_DISABLE_PRE_MIGRATIONS: %v", err)
}
// Restore the original DISABLE_PRE_MIGRATIONS environment variable at the end of the test
defer func() {
if err := os.Setenv("LOTUS_DISABLE_PRE_MIGRATIONS", originalValue); err != nil {
t.Fatalf("failed to restore LOTUS_DISABLE_PRE_MIGRATIONS: %v", err)
}
}()
//stm: @CHAIN_GEN_NEXT_TIPSET_001, //stm: @CHAIN_GEN_NEXT_TIPSET_001,
//stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001 //stm: @CHAIN_STATE_RESOLVE_TO_KEY_ADDR_001, @CHAIN_STATE_SET_VM_CONSTRUCTOR_001
logging.SetAllLoggers(logging.LevelInfo) logging.SetAllLoggers(logging.LevelInfo)

View File

@ -44,7 +44,7 @@ func (s *RPCStateManager) LoadActorTsk(ctx context.Context, addr address.Address
return s.gapi.StateGetActor(ctx, addr, tsk) return s.gapi.StateGetActor(ctx, addr, tsk)
} }
func (s *RPCStateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (s *RPCStateManager) LookupIDAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
return s.gapi.StateLookupID(ctx, addr, ts.Key()) return s.gapi.StateLookupID(ctx, addr, ts.Key())
} }

View File

@ -243,7 +243,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet
return nil, nil, cid.Undef, xerrors.Errorf("failed to load initital tipset") return nil, nil, cid.Undef, xerrors.Errorf("failed to load initital tipset")
} }
mFromId, err := sm.LookupID(ctx, m.VMMessage().From, from) mFromId, err := sm.LookupIDAddress(ctx, m.VMMessage().From, from)
if err != nil { if err != nil {
return nil, nil, cid.Undef, xerrors.Errorf("looking up From id address: %w", err) return nil, nil, cid.Undef, xerrors.Errorf("looking up From id address: %w", err)
} }

View File

@ -49,7 +49,7 @@ type StateManagerAPI interface {
Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error)
GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error) GetPaychState(ctx context.Context, addr address.Address, ts *types.TipSet) (*types.Actor, paych.State, error)
LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error)
LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) LookupIDAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
ResolveToDeterministicAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) ResolveToDeterministicAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error)
} }
@ -113,6 +113,10 @@ func (m *migrationResultCache) Store(ctx context.Context, root cid.Cid, resultCi
return nil return nil
} }
func (m *migrationResultCache) Delete(ctx context.Context, root cid.Cid) {
_ = m.ds.Delete(ctx, m.keyForMigration(root))
}
type Executor interface { type Executor interface {
NewActorRegistry() *vm.ActorRegistry NewActorRegistry() *vm.ActorRegistry
ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor, vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error) ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor, vmTracing bool) (stateroot cid.Cid, rectsroot cid.Cid, err error)
@ -396,13 +400,30 @@ func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Addres
return kaddr.Payload(), nil return kaddr.Payload(), nil
} }
func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { func (sm *StateManager) LookupIDAddress(_ context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) {
// Check for the fast route first to avoid unnecessary CBOR store instantiation and state tree load.
if addr.Protocol() == address.ID {
return addr, nil
}
cst := cbor.NewCborStore(sm.cs.StateBlockstore()) cst := cbor.NewCborStore(sm.cs.StateBlockstore())
state, err := state.LoadStateTree(cst, sm.parentState(ts)) state, err := state.LoadStateTree(cst, sm.parentState(ts))
if err != nil { if err != nil {
return address.Undef, xerrors.Errorf("load state tree: %w", err) return address.Undef, xerrors.Errorf("load state tree: %w", err)
} }
return state.LookupID(addr) return state.LookupIDAddress(addr)
}
func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (abi.ActorID, error) {
idAddr, err := sm.LookupIDAddress(ctx, addr, ts)
if err != nil {
return 0, xerrors.Errorf("state manager lookup id: %w", err)
}
id, err := address.IDFromAddress(idAddr)
if err != nil {
return 0, xerrors.Errorf("resolve actor id: id from addr: %w", err)
}
return abi.ActorID(id), nil
} }
func (sm *StateManager) LookupRobustAddress(ctx context.Context, idAddr address.Address, ts *types.TipSet) (address.Address, error) { func (sm *StateManager) LookupRobustAddress(ctx context.Context, idAddr address.Address, ts *types.TipSet) (address.Address, error) {

View File

@ -193,7 +193,7 @@ func (sm *StateManager) setupPostCalicoVesting(ctx context.Context) error {
return nil return nil
} }
// GetVestedFunds returns all funds that have "left" actors that are in the genesis state: // GetFilVested returns all funds that have "left" actors that are in the genesis state:
// - For Multisigs, it counts the actual amounts that have vested at the given epoch // - For Multisigs, it counts the actual amounts that have vested at the given epoch
// - For Accounts, it counts max(currentBalance - genesisBalance, 0). // - For Accounts, it counts max(currentBalance - genesisBalance, 0).
func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) { func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch) (abi.TokenAmount, error) {

View File

@ -44,23 +44,15 @@ func TestChainCheckpoint(t *testing.T) {
head := cs.GetHeaviestTipSet() head := cs.GetHeaviestTipSet()
require.True(t, head.Equals(checkpointParents)) require.True(t, head.Equals(checkpointParents))
// Try to set the checkpoint in the future, it should fail. // Checkpoint into the future.
err = cs.SetCheckpoint(ctx, checkpoint) err = cs.SetCheckpoint(ctx, checkpoint)
require.Error(t, err)
// Then move the head back.
err = cs.SetHead(ctx, checkpoint)
require.NoError(t, err) require.NoError(t, err)
// Verify it worked. // And verify that it worked.
head = cs.GetHeaviestTipSet() head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(checkpoint)) require.True(t, head.Equals(checkpoint))
// And checkpoint it. // Let the second miner mine a fork
err = cs.SetCheckpoint(ctx, checkpoint)
require.NoError(t, err)
// Let the second miner miner mine a fork
last = checkpointParents last = checkpointParents
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0) ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
@ -85,11 +77,10 @@ func TestChainCheckpoint(t *testing.T) {
head = cs.GetHeaviestTipSet() head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(last)) require.True(t, head.Equals(last))
// Setting a checkpoint on the other fork should fail. // We should switch back if we checkpoint again.
err = cs.SetCheckpoint(ctx, checkpoint) err = cs.SetCheckpoint(ctx, checkpoint)
require.Error(t, err)
// Setting a checkpoint on this fork should succeed.
err = cs.SetCheckpoint(ctx, checkpointParents)
require.NoError(t, err) require.NoError(t, err)
head = cs.GetHeaviestTipSet()
require.True(t, head.Equals(checkpoint))
} }

View File

@ -119,7 +119,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
var sender address.Address var sender address.Address
if ts.Height() >= build.UpgradeHyperdriveHeight { if ts.Height() >= build.UpgradeHyperdriveHeight {
if useIds { if useIds {
sender, err = st.LookupID(m.From) sender, err = st.LookupIDAddress(m.From)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err) return false, xerrors.Errorf("failed to resolve sender: %w", err)
} }
@ -131,14 +131,14 @@ func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet)
// uh-oh, we actually have an ID-sender! // uh-oh, we actually have an ID-sender!
useIds = true useIds = true
for robust, nonce := range applied { for robust, nonce := range applied {
resolved, err := st.LookupID(robust) resolved, err := st.LookupIDAddress(robust)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err) return false, xerrors.Errorf("failed to resolve sender: %w", err)
} }
applied[resolved] = nonce applied[resolved] = nonce
} }
sender, err = st.LookupID(m.From) sender, err = st.LookupIDAddress(m.From)
if err != nil { if err != nil {
return false, xerrors.Errorf("failed to resolve sender: %w", err) return false, xerrors.Errorf("failed to resolve sender: %w", err)
} }

View File

@ -392,7 +392,7 @@ func (s *walkScheduler) Wait() error {
log.Errorw("error writing to CAR file", "error", err) log.Errorw("error writing to CAR file", "error", err)
return errWrite return errWrite
} }
s.workerTasks.Close() //nolint:errcheck _ = s.workerTasks.Close()
return err return err
} }

View File

@ -305,6 +305,7 @@ func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange
// Unsubscribe. // Unsubscribe.
cs.bestTips.Unsub(subch) cs.bestTips.Unsub(subch)
// revive:disable-next-line:empty-block
// Drain the channel. // Drain the channel.
for range subch { for range subch {
} }
@ -752,7 +753,7 @@ func FlushValidationCache(ctx context.Context, ds dstore.Batching) error {
for _, k := range allKeys { for _, k := range allKeys {
if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) { if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) {
delCnt++ delCnt++
batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck _ = batch.Delete(ctx, dstore.RawKey(k.Key))
} }
} }
@ -793,9 +794,12 @@ func (cs *ChainStore) removeCheckpoint(ctx context.Context) error {
return nil return nil
} }
// SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. // SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. If the new
// checkpoint is not an ancestor of the current head, head will be set to the new checkpoint.
// //
// NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. // NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past, but can be set
// arbitrarily far into the future.
// NOTE: The new checkpoint must already be synced.
func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error { func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error {
tskBytes, err := json.Marshal(ts.Key()) tskBytes, err := json.Marshal(ts.Key())
if err != nil { if err != nil {
@ -805,10 +809,6 @@ func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error
cs.heaviestLk.Lock() cs.heaviestLk.Lock()
defer cs.heaviestLk.Unlock() defer cs.heaviestLk.Unlock()
if ts.Height() > cs.heaviest.Height() {
return xerrors.Errorf("cannot set a checkpoint in the future")
}
// Otherwise, this operation could get _very_ expensive. // Otherwise, this operation could get _very_ expensive.
if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold { if cs.heaviest.Height()-ts.Height() > build.ForkLengthThreshold {
return xerrors.Errorf("cannot set a checkpoint before the fork threshold") return xerrors.Errorf("cannot set a checkpoint before the fork threshold")
@ -821,7 +821,9 @@ func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error
} }
if !anc { if !anc {
return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) if err := cs.takeHeaviestTipSet(ctx, ts); err != nil {
return xerrors.Errorf("failed to switch chains when setting checkpoint: %w", err)
}
} }
} }
err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes) err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes)

View File

@ -24,7 +24,7 @@ func NewWindow(capacity int, size time.Duration) *Window {
} }
// Add attempts to append a new timestamp into the current window. Previously // Add attempts to append a new timestamp into the current window. Previously
// added values that are not not within `size` difference from the value being // added values that are not within `size` difference from the value being
// added are first removed. Add fails if adding the value would cause the // added are first removed. Add fails if adding the value would cause the
// window to exceed capacity. // window to exceed capacity.
func (w *Window) Add() error { func (w *Window) Add() error {

Some files were not shown because too many files have changed in this diff Show More