Merge branch 'master' into fix/prove-commit-aggregate

This commit is contained in:
Łukasz Magiera 2021-06-29 11:36:28 +02:00 committed by Łukasz Magiera
commit 7b1b082381
440 changed files with 28374 additions and 8721 deletions

View File

@ -7,7 +7,7 @@ orbs:
executors: executors:
golang: golang:
docker: docker:
- image: circleci/golang:1.15.5 - image: circleci/golang:1.16.4
resource_class: 2xlarge resource_class: 2xlarge
ubuntu: ubuntu:
docker: docker:
@ -112,7 +112,7 @@ jobs:
- run: - run:
command: make debug command: make debug
test: &test test:
description: | description: |
Run tests with gotestsum. Run tests with gotestsum.
parameters: &test-params parameters: &test-params
@ -123,23 +123,20 @@ jobs:
type: string type: string
default: "-timeout 30m" default: "-timeout 30m"
description: Flags passed to go test. description: Flags passed to go test.
packages: target:
type: string type: string
default: "./..." default: "./..."
description: Import paths of packages to be tested. description: Import paths of packages to be tested.
winpost-test: proofs-log-test:
type: string type: string
default: "0" default: "0"
deadline-test: suite:
type: string
default: "0"
test-suite-name:
type: string type: string
default: unit default: unit
description: Test suite name to report to CircleCI. description: Test suite name to report to CircleCI.
gotestsum-format: gotestsum-format:
type: string type: string
default: pkgname-and-test-fails default: standard-verbose
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
coverage: coverage:
type: string type: string
@ -147,7 +144,7 @@ jobs:
description: Coverage flag. Set to the empty string to disable. description: Coverage flag. Set to the empty string to disable.
codecov-upload: codecov-upload:
type: boolean type: boolean
default: false default: true
description: | description: |
Upload coverage report to https://codecov.io/. Requires the codecov API token to be Upload coverage report to https://codecov.io/. Requires the codecov API token to be
set as an environment variable for private projects. set as an environment variable for private projects.
@ -165,25 +162,24 @@ jobs:
- run: - run:
name: go test name: go test
environment: environment:
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >> TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
LOTUS_TEST_DEADLINE_TOGGLING: << parameters.deadline-test >>
SKIP_CONFORMANCE: "1" SKIP_CONFORMANCE: "1"
command: | command: |
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts mkdir -p /tmp/test-artifacts
gotestsum \ gotestsum \
--format << parameters.gotestsum-format >> \ --format << parameters.gotestsum-format >> \
--junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \ --junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \ --jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
-- \ -- \
<< parameters.coverage >> \ << parameters.coverage >> \
<< parameters.go-test-flags >> \ << parameters.go-test-flags >> \
<< parameters.packages >> << parameters.target >>
no_output_timeout: 30m no_output_timeout: 30m
- store_test_results: - store_test_results:
path: /tmp/test-reports path: /tmp/test-reports
- store_artifacts: - store_artifacts:
path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json path: /tmp/test-artifacts/<< parameters.suite >>.json
- when: - when:
condition: << parameters.codecov-upload >> condition: << parameters.codecov-upload >>
steps: steps:
@ -194,24 +190,6 @@ jobs:
command: | command: |
bash <(curl -s https://codecov.io/bash) bash <(curl -s https://codecov.io/bash)
test-chain:
<<: *test
test-node:
<<: *test
test-storage:
<<: *test
test-cli:
<<: *test
test-short:
<<: *test
test-window-post:
<<: *test
test-window-post-dispute:
<<: *test
test-deadline-toggling:
<<: *test
test-terminate:
<<: *test
test-conformance: test-conformance:
description: | description: |
Run tests using a corpus of interoperable test vectors for Filecoin Run tests using a corpus of interoperable test vectors for Filecoin
@ -331,7 +309,7 @@ jobs:
- run: cd extern/filecoin-ffi && make - run: cd extern/filecoin-ffi && make
- run: - run:
name: "go get lotus@master" name: "go get lotus@master"
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
- run: - run:
name: "build lotus-soup testplan" name: "build lotus-soup testplan"
command: pushd testplans/lotus-soup && go build -tags=testground . command: pushd testplans/lotus-soup && go build -tags=testground .
@ -379,8 +357,8 @@ jobs:
- run: - run:
name: Install go name: Install go
command: | command: |
curl -O https://dl.google.com/go/go1.15.5.darwin-amd64.pkg && \ curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
sudo installer -pkg go1.15.5.darwin-amd64.pkg -target / sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
- run: - run:
name: Install pkg-config name: Install pkg-config
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
@ -456,12 +434,13 @@ jobs:
name: prepare workspace name: prepare workspace
command: | command: |
mkdir appimage mkdir appimage
mv Lotus-latest-x86_64.AppImage appimage mv Lotus-*.AppImage appimage
- persist_to_workspace: - persist_to_workspace:
root: "." root: "."
paths: paths:
- appimage - appimage
gofmt: gofmt:
executor: golang executor: golang
steps: steps:
@ -470,7 +449,7 @@ jobs:
- run: - run:
command: "! go fmt ./... 2>&1 | read" command: "! go fmt ./... 2>&1 | read"
cbor-gen-check: gen-check:
executor: golang executor: golang
steps: steps:
- install-deps - install-deps
@ -478,7 +457,10 @@ jobs:
- run: make deps - run: make deps
- run: go install golang.org/x/tools/cmd/goimports - run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for - run: go install github.com/hannahhoward/cbor-gen-for
- run: make type-gen - run: make gen
- run: git --no-pager diff
- run: git --no-pager diff --quiet
- run: make docsgen-cli
- run: git --no-pager diff - run: git --no-pager diff
- run: git --no-pager diff --quiet - run: git --no-pager diff --quiet
@ -561,6 +543,33 @@ jobs:
name: Publish release name: Publish release
command: ./scripts/publish-release.sh command: ./scripts/publish-release.sh
publish-snapcraft:
description: build and push snapcraft
machine:
image: ubuntu-2004:202104-01
resource_class: 2xlarge
parameters:
channel:
type: string
default: "edge"
description: snapcraft channel
steps:
- checkout
- run:
name: install snapcraft
command: sudo snap install snapcraft --classic
- run:
name: create snapcraft config file
command: |
mkdir -p ~/.config/snapcraft
echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
- run:
name: build snap
command: snapcraft --use-lxd
- run:
name: publish snap
command: snapcraft push *.snap --release << parameters.channel >>
build-and-push-image: build-and-push-image:
description: build and push docker images to public AWS ECR registry description: build and push docker images to public AWS ECR registry
executor: aws-cli/default executor: aws-cli/default
@ -735,61 +744,148 @@ workflows:
concurrency: "16" # expend all docker 2xlarge CPUs. concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check - mod-tidy-check
- gofmt - gofmt
- cbor-gen-check - gen-check
- docs-check - docs-check
- test: - test:
codecov-upload: true name: test-itest-api
test-suite-name: full suite: itest-api
- test-chain: target: "./itests/api_test.go"
codecov-upload: true
test-suite-name: chain - test:
packages: "./chain/..." name: test-itest-batch_deal
- test-node: suite: itest-batch_deal
codecov-upload: true target: "./itests/batch_deal_test.go"
test-suite-name: node
packages: "./node/..." - test:
- test-storage: name: test-itest-ccupgrade
codecov-upload: true suite: itest-ccupgrade
test-suite-name: storage target: "./itests/ccupgrade_test.go"
packages: "./storage/... ./extern/..."
- test-cli: - test:
codecov-upload: true name: test-itest-cli
test-suite-name: cli suite: itest-cli
packages: "./cli/... ./cmd/... ./api/..." target: "./itests/cli_test.go"
- test-window-post:
codecov-upload: true - test:
go-test-flags: "-run=TestWindowedPost" name: test-itest-deadlines
winpost-test: "1" suite: itest-deadlines
test-suite-name: window-post target: "./itests/deadlines_test.go"
- test-window-post-dispute:
codecov-upload: true - test:
go-test-flags: "-run=TestWindowPostDispute" name: test-itest-deals_concurrent
winpost-test: "1" suite: itest-deals_concurrent
test-suite-name: window-post-dispute target: "./itests/deals_concurrent_test.go"
- test-terminate:
codecov-upload: true - test:
go-test-flags: "-run=TestTerminate" name: test-itest-deals_offline
winpost-test: "1" suite: itest-deals_offline
test-suite-name: terminate target: "./itests/deals_offline_test.go"
- test-deadline-toggling:
codecov-upload: true - test:
go-test-flags: "-run=TestDeadlineToggling" name: test-itest-deals_power
deadline-test: "1" suite: itest-deals_power
test-suite-name: deadline-toggling target: "./itests/deals_power_test.go"
- test-short:
go-test-flags: "--timeout 10m --short" - test:
test-suite-name: short name: test-itest-deals_pricing
filters: suite: itest-deals_pricing
tags: target: "./itests/deals_pricing_test.go"
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - test:
name: test-itest-deals_publish
suite: itest-deals_publish
target: "./itests/deals_publish_test.go"
- test:
name: test-itest-deals
suite: itest-deals
target: "./itests/deals_test.go"
- test:
name: test-itest-gateway
suite: itest-gateway
target: "./itests/gateway_test.go"
- test:
name: test-itest-multisig
suite: itest-multisig
target: "./itests/multisig_test.go"
- test:
name: test-itest-paych_api
suite: itest-paych_api
target: "./itests/paych_api_test.go"
- test:
name: test-itest-paych_cli
suite: itest-paych_cli
target: "./itests/paych_cli_test.go"
- test:
name: test-itest-sdr_upgrade
suite: itest-sdr_upgrade
target: "./itests/sdr_upgrade_test.go"
- test:
name: test-itest-sector_pledge
suite: itest-sector_pledge
target: "./itests/sector_pledge_test.go"
- test:
name: test-itest-sector_terminate
suite: itest-sector_terminate
target: "./itests/sector_terminate_test.go"
- test:
name: test-itest-tape
suite: itest-tape
target: "./itests/tape_test.go"
- test:
name: test-itest-verifreg
suite: itest-verifreg
target: "./itests/verifreg_test.go"
- test:
name: test-itest-wdpost_dispute
suite: itest-wdpost_dispute
target: "./itests/wdpost_dispute_test.go"
- test:
name: test-itest-wdpost
suite: itest-wdpost
target: "./itests/wdpost_test.go"
- test:
name: test-unit-cli
suite: utest-unit-cli
target: "./cli/... ./cmd/... ./api/..."
- test:
name: test-unit-node
suite: utest-unit-node
target: "./node/..."
- test:
name: test-unit-rest
suite: utest-unit-rest
target: "./api/... ./blockstore/... ./build/... ./chain/... ./cli/... ./cmd/... ./conformance/... ./extern/... ./gateway/... ./journal/... ./lib/... ./markets/... ./node/... ./paychmgr/... ./storage/... ./tools/..."
- test:
name: test-unit-storage
suite: utest-unit-storage
target: "./storage/... ./extern/..."
- test:
go-test-flags: "-run=TestMulticoreSDR"
suite: multicore-sdr-check
target: "./extern/sector-storage/ffiwrapper"
proofs-log-test: "1"
- test-conformance: - test-conformance:
test-suite-name: conformance suite: conformance
packages: "./conformance" codecov-upload: false
target: "./conformance"
- test-conformance: - test-conformance:
name: test-conformance-bleeding-edge name: test-conformance-bleeding-edge
test-suite-name: conformance-bleeding-edge codecov-upload: false
packages: "./conformance" suite: conformance-bleeding-edge
target: "./conformance"
vectors-branch: master vectors-branch: master
- trigger-testplans: - trigger-testplans:
filters: filters:
@ -798,37 +894,27 @@ workflows:
- master - master
- build-debug - build-debug
- build-all: - build-all:
requires:
- test-short
filters: filters:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-calibration: - build-ntwk-calibration:
requires:
- test-short
filters: filters:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-butterfly: - build-ntwk-butterfly:
requires:
- test-short
filters: filters:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-nerpa: - build-ntwk-nerpa:
requires:
- test-short
filters: filters:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-lotus-soup - build-lotus-soup
- build-macos: - build-macos:
requires:
- test-short
filters: filters:
branches: branches:
ignore: ignore:
@ -837,8 +923,6 @@ workflows:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage: - build-appimage:
requires:
- test-short
filters: filters:
branches: branches:
ignore: ignore:
@ -858,7 +942,6 @@ workflows:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-and-push-image: - build-and-push-image:
dockerfile: Dockerfile.lotus dockerfile: Dockerfile.lotus
path: . path: .
@ -904,3 +987,26 @@ workflows:
tags: tags:
only: only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/ - /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-snapcraft:
name: publish-snapcraft-stable
channel: stable
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters:
branches:
only:
- master
jobs:
- publish-snapcraft:
name: publish-snapcraft-nightly
channel: edge

136
.circleci/gen.go Normal file
View File

@ -0,0 +1,136 @@
package main
import (
"embed"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"text/template"
)
//go:generate go run ./gen.go ..
//go:embed template.yml
var templateFile embed.FS
type (
dirs = []string
suite = string
)
// groupedUnitTests maps suite names to top-level directories that should be
// included in that suite. The program adds an implicit group "rest" that
// includes all other top-level directories.
var groupedUnitTests = map[suite]dirs{
"unit-node": {"node"},
"unit-storage": {"storage", "extern"},
"unit-cli": {"cli", "cmd", "api"},
}
func main() {
if len(os.Args) != 2 {
panic("expected path to repo as argument")
}
repo := os.Args[1]
tmpl := template.New("template.yml")
tmpl.Delims("[[", "]]")
tmpl.Funcs(template.FuncMap{
"stripSuffix": func(in string) string {
return strings.TrimSuffix(in, "_test.go")
},
})
tmpl = template.Must(tmpl.ParseFS(templateFile, "*"))
// list all itests.
itests, err := filepath.Glob(filepath.Join(repo, "./itests/*_test.go"))
if err != nil {
panic(err)
}
// strip the dir from all entries.
for i, f := range itests {
itests[i] = filepath.Base(f)
}
// calculate the exclusion set of unit test directories to exclude because
// they are already included in a grouped suite.
var excluded = map[string]struct{}{}
for _, ss := range groupedUnitTests {
for _, s := range ss {
e, err := filepath.Abs(filepath.Join(repo, s))
if err != nil {
panic(err)
}
excluded[e] = struct{}{}
}
}
// all unit tests top-level dirs that are not itests, nor included in other suites.
var rest = map[string]struct{}{}
err = filepath.Walk(repo, func(path string, f os.FileInfo, err error) error {
// include all tests that aren't in the itests directory.
if strings.Contains(path, "itests") {
return filepath.SkipDir
}
// exclude all tests included in other suites
if f.IsDir() {
if _, ok := excluded[path]; ok {
return filepath.SkipDir
}
}
if strings.HasSuffix(path, "_test.go") {
rel, err := filepath.Rel(repo, path)
if err != nil {
panic(err)
}
// take the first directory
rest[strings.Split(rel, string(os.PathSeparator))[0]] = struct{}{}
}
return err
})
if err != nil {
panic(err)
}
// add other directories to a 'rest' suite.
for k := range rest {
groupedUnitTests["unit-rest"] = append(groupedUnitTests["unit-rest"], k)
}
// map iteration guarantees no order, so sort the array in-place.
sort.Strings(groupedUnitTests["unit-rest"])
// form the input data.
type data struct {
ItestFiles []string
UnitSuites map[string]string
}
in := data{
ItestFiles: itests,
UnitSuites: func() map[string]string {
ret := make(map[string]string)
for name, dirs := range groupedUnitTests {
for i, d := range dirs {
dirs[i] = fmt.Sprintf("./%s/...", d) // turn into package
}
ret[name] = strings.Join(dirs, " ")
}
return ret
}(),
}
out, err := os.Create("./config.yml")
if err != nil {
panic(err)
}
defer out.Close()
// execute the template.
if err := tmpl.Execute(out, in); err != nil {
panic(err)
}
}

902
.circleci/template.yml Normal file
View File

@ -0,0 +1,902 @@
version: 2.1
orbs:
go: gotest/tools@0.0.13
aws-cli: circleci/aws-cli@1.3.2
packer: salaxander/packer@0.0.3
executors:
golang:
docker:
- image: circleci/golang:1.16.4
resource_class: 2xlarge
ubuntu:
docker:
- image: ubuntu:20.04
commands:
install-deps:
steps:
- go/install-ssh
- go/install: {package: git}
prepare:
parameters:
linux:
default: true
description: is a linux build environment?
type: boolean
darwin:
default: false
description: is a darwin build environment?
type: boolean
steps:
- checkout
- git_fetch_all_tags
- checkout
- when:
condition: << parameters.linux >>
steps:
- run: sudo apt-get update
- run: sudo apt-get install ocl-icd-opencl-dev libhwloc-dev
- run: git submodule sync
- run: git submodule update --init
download-params:
steps:
- restore_cache:
name: Restore parameters cache
keys:
- 'v25-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
- run: ./lotus fetch-params 2048
- save_cache:
name: Save parameters cache
key: 'v25-2k-lotus-params'
paths:
- /var/tmp/filecoin-proof-parameters/
install_ipfs:
steps:
- run: |
apt update
apt install -y wget
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
then
echo "ipfs failed checksum check"
exit 1
fi
tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
mv go-ipfs/ipfs /usr/local/bin/ipfs
chmod +x /usr/local/bin/ipfs
git_fetch_all_tags:
steps:
- run:
name: fetch all tags
command: |
git fetch --all
jobs:
mod-tidy-check:
executor: golang
steps:
- install-deps
- prepare
- go/mod-tidy-check
build-all:
executor: golang
steps:
- install-deps
- prepare
- run: sudo apt-get update
- run: sudo apt-get install npm
- run:
command: make buildall
- store_artifacts:
path: lotus
- store_artifacts:
path: lotus-miner
- store_artifacts:
path: lotus-worker
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
- persist_to_workspace:
root: "."
paths:
- linux
build-debug:
executor: golang
steps:
- install-deps
- prepare
- run:
command: make debug
test:
description: |
Run tests with gotestsum.
parameters: &test-params
executor:
type: executor
default: golang
go-test-flags:
type: string
default: "-timeout 30m"
description: Flags passed to go test.
target:
type: string
default: "./..."
description: Import paths of packages to be tested.
proofs-log-test:
type: string
default: "0"
suite:
type: string
default: unit
description: Test suite name to report to CircleCI.
gotestsum-format:
type: string
default: standard-verbose
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
coverage:
type: string
default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/...
description: Coverage flag. Set to the empty string to disable.
codecov-upload:
type: boolean
default: true
description: |
Upload coverage report to https://codecov.io/. Requires the codecov API token to be
set as an environment variable for private projects.
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- download-params
- go/install-gotestsum:
gobin: $HOME/.local/bin
version: 0.5.2
- run:
name: go test
environment:
TEST_RUSTPROOFS_LOGS: << parameters.proofs-log-test >>
SKIP_CONFORMANCE: "1"
command: |
mkdir -p /tmp/test-reports/<< parameters.suite >>
mkdir -p /tmp/test-artifacts
gotestsum \
--format << parameters.gotestsum-format >> \
--junitfile /tmp/test-reports/<< parameters.suite >>/junit.xml \
--jsonfile /tmp/test-artifacts/<< parameters.suite >>.json \
-- \
<< parameters.coverage >> \
<< parameters.go-test-flags >> \
<< parameters.target >>
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/<< parameters.suite >>.json
- when:
condition: << parameters.codecov-upload >>
steps:
- go/install: {package: bash}
- go/install: {package: curl}
- run:
shell: /bin/bash -eo pipefail
command: |
bash <(curl -s https://codecov.io/bash)
test-conformance:
description: |
Run tests using a corpus of interoperable test vectors for Filecoin
implementations to test their correctness and compliance with the Filecoin
specifications.
parameters:
<<: *test-params
vectors-branch:
type: string
default: ""
description: |
Branch on github.com/filecoin-project/test-vectors to checkout and
test with. If empty (the default) the commit defined by the git
submodule is used.
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run:
command: make deps lotus
no_output_timeout: 30m
- download-params
- when:
condition:
not:
equal: [ "", << parameters.vectors-branch >> ]
steps:
- run:
name: checkout vectors branch
command: |
cd extern/test-vectors
git fetch
git checkout origin/<< parameters.vectors-branch >>
- go/install-gotestsum:
gobin: $HOME/.local/bin
version: 0.5.2
- run:
name: install statediff globally
command: |
## statediff is optional; we succeed even if compilation fails.
mkdir -p /tmp/statediff
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
cd /tmp/statediff
go install ./cmd/statediff || exit 0
- run:
name: go test
environment:
SKIP_CONFORMANCE: "0"
command: |
mkdir -p /tmp/test-reports
mkdir -p /tmp/test-artifacts
gotestsum \
--format pkgname-and-test-fails \
--junitfile /tmp/test-reports/junit.xml \
-- \
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
no_output_timeout: 30m
- store_test_results:
path: /tmp/test-reports
- store_artifacts:
path: /tmp/test-artifacts/conformance-coverage.html
build-ntwk-calibration:
description: |
Compile lotus binaries for the calibration network
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: make calibnet
- run: mkdir linux-calibrationnet && mv lotus lotus-miner lotus-worker linux-calibrationnet
- persist_to_workspace:
root: "."
paths:
- linux-calibrationnet
build-ntwk-butterfly:
description: |
Compile lotus binaries for the butterfly network
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: make butterflynet
- run: mkdir linux-butterflynet && mv lotus lotus-miner lotus-worker linux-butterflynet
- persist_to_workspace:
root: "."
paths:
- linux-butterflynet
build-ntwk-nerpa:
description: |
Compile lotus binaries for the nerpa network
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: make nerpanet
- run: mkdir linux-nerpanet && mv lotus lotus-miner lotus-worker linux-nerpanet
- persist_to_workspace:
root: "."
paths:
- linux-nerpanet
build-lotus-soup:
description: |
Compile `lotus-soup` Testground test plan
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run: cd extern/filecoin-ffi && make
- run:
name: "go get lotus@master"
command: cd testplans/lotus-soup && go mod edit -replace=github.com/filecoin-project/lotus=../.. && go mod tidy
- run:
name: "build lotus-soup testplan"
command: pushd testplans/lotus-soup && go build -tags=testground .
trigger-testplans:
description: |
Trigger `lotus-soup` test cases on TaaS
parameters:
<<: *test-params
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run:
name: "download testground"
command: wget https://gist.github.com/nonsense/5fbf3167cac79945f658771aed32fc44/raw/2e17eb0debf7ec6bdf027c1bdafc2c92dd97273b/testground-d3e9603 -O ~/testground-cli && chmod +x ~/testground-cli
- run:
name: "prepare .env.toml"
command: pushd testplans/lotus-soup && mkdir -p $HOME/testground && cp env-ci.toml $HOME/testground/.env.toml && echo 'endpoint="https://ci.testground.ipfs.team"' >> $HOME/testground/.env.toml && echo 'user="circleci"' >> $HOME/testground/.env.toml
- run:
name: "prepare testground home dir and link test plans"
command: mkdir -p $HOME/testground/plans && ln -s $(pwd)/testplans/lotus-soup $HOME/testground/plans/lotus-soup && ln -s $(pwd)/testplans/graphsync $HOME/testground/plans/graphsync
- run:
name: "go get lotus@master"
command: cd testplans/lotus-soup && go get github.com/filecoin-project/lotus@master
- run:
name: "trigger deals baseline testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/baseline-k8s-3-1.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
- run:
name: "trigger payment channel stress testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/lotus-soup/_compositions/paych-stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
- run:
name: "trigger graphsync testplan on taas"
command: ~/testground-cli run composition -f $HOME/testground/plans/graphsync/_compositions/stress-k8s.toml --metadata-commit=$CIRCLE_SHA1 --metadata-repo=filecoin-project/lotus --metadata-branch=$CIRCLE_BRANCH
build-macos:
description: build darwin lotus binary
macos:
xcode: "10.0.0"
working_directory: ~/go/src/github.com/filecoin-project/lotus
steps:
- prepare:
linux: false
darwin: true
- run:
name: Install go
command: |
curl -O https://dl.google.com/go/go1.16.4.darwin-amd64.pkg && \
sudo installer -pkg go1.16.4.darwin-amd64.pkg -target /
- run:
name: Install pkg-config
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
- run: go version
- run:
name: Install Rust
command: |
curl https://sh.rustup.rs -sSf | sh -s -- -y
- run:
name: Install jq
command: |
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
chmod +x /usr/local/bin/jq
- run:
name: Install hwloc
command: |
mkdir ~/hwloc
curl --location https://download.open-mpi.org/release/hwloc/v2.4/hwloc-2.4.1.tar.gz --output ~/hwloc/hwloc-2.4.1.tar.gz
cd ~/hwloc
tar -xvzpf hwloc-2.4.1.tar.gz
cd hwloc-2.4.1
./configure && make && sudo make install
- restore_cache:
name: restore cargo cache
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
- install-deps
- run:
command: make build
no_output_timeout: 30m
- store_artifacts:
path: lotus
- store_artifacts:
path: lotus-miner
- store_artifacts:
path: lotus-worker
- run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
- persist_to_workspace:
root: "."
paths:
- darwin
- save_cache:
name: save cargo cache
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
paths:
- "~/.rustup"
- "~/.cargo"
build-appimage:
machine:
image: ubuntu-2004:202104-01
steps:
- checkout
- attach_workspace:
at: "."
- run:
name: install appimage-builder
command: |
# docs: https://appimage-builder.readthedocs.io/en/latest/intro/install.html
sudo apt update
sudo apt install -y python3-pip python3-setuptools patchelf desktop-file-utils libgdk-pixbuf2.0-dev fakeroot strace
sudo curl -Lo /usr/local/bin/appimagetool https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage
sudo chmod +x /usr/local/bin/appimagetool
sudo pip3 install appimage-builder
- run:
name: install lotus dependencies
command: sudo apt install ocl-icd-opencl-dev libhwloc-dev
- run:
name: build appimage
command: |
sed -i "s/version: latest/version: ${CIRCLE_TAG:-latest}/" AppImageBuilder.yml
make appimage
- run:
name: prepare workspace
command: |
mkdir appimage
mv Lotus-*.AppImage appimage
- persist_to_workspace:
root: "."
paths:
- appimage
gofmt:
executor: golang
steps:
- install-deps
- prepare
- run:
command: "! go fmt ./... 2>&1 | read"
gen-check:
executor: golang
steps:
- install-deps
- prepare
- run: make deps
- run: go install golang.org/x/tools/cmd/goimports
- run: go install github.com/hannahhoward/cbor-gen-for
- run: make gen
- run: git --no-pager diff
- run: git --no-pager diff --quiet
- run: make docsgen-cli
- run: git --no-pager diff
- run: git --no-pager diff --quiet
docs-check:
executor: golang
steps:
- install-deps
- prepare
- run: go install golang.org/x/tools/cmd/goimports
- run: zcat build/openrpc/full.json.gz | jq > ../pre-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../pre-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../pre-openrpc-worker
- run: make deps
- run: make docsgen
- run: zcat build/openrpc/full.json.gz | jq > ../post-openrpc-full
- run: zcat build/openrpc/miner.json.gz | jq > ../post-openrpc-miner
- run: zcat build/openrpc/worker.json.gz | jq > ../post-openrpc-worker
- run: git --no-pager diff
- run: diff ../pre-openrpc-full ../post-openrpc-full
- run: diff ../pre-openrpc-miner ../post-openrpc-miner
- run: diff ../pre-openrpc-worker ../post-openrpc-worker
- run: git --no-pager diff --quiet
lint: &lint
description: |
Run golangci-lint.
parameters:
executor:
type: executor
default: golang
golangci-lint-version:
type: string
default: 1.27.0
concurrency:
type: string
default: '2'
description: |
Concurrency used to run linters. Defaults to 2 because NumCPU is not
aware of container CPU limits.
args:
type: string
default: ''
description: |
Arguments to pass to golangci-lint
executor: << parameters.executor >>
steps:
- install-deps
- prepare
- run:
command: make deps
no_output_timeout: 30m
- go/install-golangci-lint:
gobin: $HOME/.local/bin
version: << parameters.golangci-lint-version >>
- run:
name: Lint
command: |
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
--concurrency << parameters.concurrency >> << parameters.args >>
lint-all:
<<: *lint
publish:
description: publish binary artifacts
executor: ubuntu
steps:
- run:
name: Install git jq curl
command: apt update && apt install -y git jq curl
- checkout
- git_fetch_all_tags
- checkout
- install_ipfs
- attach_workspace:
at: "."
- run:
name: Create bundles
command: ./scripts/build-bundle.sh
- run:
name: Publish release
command: ./scripts/publish-release.sh
publish-snapcraft:
description: build and push snapcraft
machine:
image: ubuntu-2004:202104-01
resource_class: 2xlarge
parameters:
channel:
type: string
default: "edge"
description: snapcraft channel
steps:
- checkout
- run:
name: install snapcraft
command: sudo snap install snapcraft --classic
- run:
name: create snapcraft config file
command: |
mkdir -p ~/.config/snapcraft
echo "$SNAPCRAFT_LOGIN_FILE" | base64 -d > ~/.config/snapcraft/snapcraft.cfg
- run:
name: build snap
command: snapcraft --use-lxd
- run:
name: publish snap
command: snapcraft push *.snap --release << parameters.channel >>
build-and-push-image:
description: build and push docker images to public AWS ECR registry
executor: aws-cli/default
parameters:
profile-name:
type: string
default: "default"
description: AWS profile name to be configured.
aws-access-key-id:
type: env_var_name
default: AWS_ACCESS_KEY_ID
description: >
AWS access key id for IAM role. Set this to the name of
the environment variable you will set to hold this
value, i.e. AWS_ACCESS_KEY.
aws-secret-access-key:
type: env_var_name
default: AWS_SECRET_ACCESS_KEY
description: >
AWS secret key for IAM role. Set this to the name of
the environment variable you will set to hold this
value, i.e. AWS_SECRET_ACCESS_KEY.
region:
type: env_var_name
default: AWS_REGION
description: >
Name of env var storing your AWS region information,
defaults to AWS_REGION
account-url:
type: env_var_name
default: AWS_ECR_ACCOUNT_URL
description: >
Env var storing Amazon ECR account URL that maps to an AWS account,
e.g. {awsAccountNum}.dkr.ecr.us-west-2.amazonaws.com
defaults to AWS_ECR_ACCOUNT_URL
dockerfile:
type: string
default: Dockerfile
description: Name of dockerfile to use. Defaults to Dockerfile.
path:
type: string
default: .
description: Path to the directory containing your Dockerfile and build context. Defaults to . (working directory).
extra-build-args:
type: string
default: ""
description: >
Extra flags to pass to docker build. For examples, see
https://docs.docker.com/engine/reference/commandline/build
repo:
type: string
description: Name of an Amazon ECR repository
tag:
type: string
default: "latest"
description: A comma-separated string containing docker image tags to build and push (default = latest)
steps:
- run:
name: Confirm that environment variables are set
command: |
if [ -z "$AWS_ACCESS_KEY_ID" ]; then
echo "No AWS_ACCESS_KEY_ID is set. Skipping build-and-push job ..."
circleci-agent step halt
fi
- aws-cli/setup:
profile-name: <<parameters.profile-name>>
aws-access-key-id: <<parameters.aws-access-key-id>>
aws-secret-access-key: <<parameters.aws-secret-access-key>>
aws-region: <<parameters.region>>
- run:
name: Log into Amazon ECR
command: |
aws ecr-public get-login-password --region $<<parameters.region>> --profile <<parameters.profile-name>> | docker login --username AWS --password-stdin $<<parameters.account-url>>
- checkout
- setup_remote_docker:
version: 19.03.13
docker_layer_caching: false
- run:
name: Build docker image
command: |
registry_id=$(echo $<<parameters.account-url>> | sed "s;\..*;;g")
docker_tag_args=""
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
for tag in "${DOCKER_TAGS[@]}"; do
docker_tag_args="$docker_tag_args -t $<<parameters.account-url>>/<<parameters.repo>>:$tag"
done
docker build \
<<#parameters.extra-build-args>><<parameters.extra-build-args>><</parameters.extra-build-args>> \
-f <<parameters.path>>/<<parameters.dockerfile>> \
$docker_tag_args \
<<parameters.path>>
- run:
name: Push image to Amazon ECR
command: |
IFS="," read -ra DOCKER_TAGS \<<< "<< parameters.tag >>"
for tag in "${DOCKER_TAGS[@]}"; do
docker push $<<parameters.account-url>>/<<parameters.repo>>:${tag}
done
publish-packer-mainnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux -var lotus_network=mainnet -var git_tag=$CIRCLE_TAG"
publish-packer-calibrationnet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-calibrationnet -var lotus_network=calibrationnet -var git_tag=$CIRCLE_TAG"
publish-packer-butterflynet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-butterflynet -var lotus_network=butterflynet -var git_tag=$CIRCLE_TAG"
publish-packer-nerpanet:
description: build and push AWS IAM and DigitalOcean droplet.
executor:
name: packer/default
packer-version: 1.6.6
steps:
- checkout
- attach_workspace:
at: "."
- packer/build:
template: tools/packer/lotus.pkr.hcl
args: "-var ci_workspace_bins=./linux-nerpanet -var lotus_network=nerpanet -var git_tag=$CIRCLE_TAG"
workflows:
version: 2.1
ci:
jobs:
- lint-all:
concurrency: "16" # expend all docker 2xlarge CPUs.
- mod-tidy-check
- gofmt
- gen-check
- docs-check
[[- range $file := .ItestFiles -]]
[[ with $name := $file | stripSuffix ]]
- test:
name: test-itest-[[ $name ]]
suite: itest-[[ $name ]]
target: "./itests/[[ $file ]]"
[[ end ]]
[[- end -]]
[[range $suite, $pkgs := .UnitSuites]]
- test:
name: test-[[ $suite ]]
suite: utest-[[ $suite ]]
target: "[[ $pkgs ]]"
[[- end]]
- test:
go-test-flags: "-run=TestMulticoreSDR"
suite: multicore-sdr-check
target: "./extern/sector-storage/ffiwrapper"
proofs-log-test: "1"
- test-conformance:
suite: conformance
codecov-upload: false
target: "./conformance"
- test-conformance:
name: test-conformance-bleeding-edge
codecov-upload: false
suite: conformance-bleeding-edge
target: "./conformance"
vectors-branch: master
- trigger-testplans:
filters:
branches:
only:
- master
- build-debug
- build-all:
filters:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-calibration:
filters:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-butterfly:
filters:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-ntwk-nerpa:
filters:
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-lotus-soup
- build-macos:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-appimage:
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish:
requires:
- build-all
- build-macos
- build-appimage
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- build-and-push-image:
dockerfile: Dockerfile.lotus
path: .
repo: lotus-dev
tag: '${CIRCLE_SHA1:0:8}'
- publish-packer-mainnet:
requires:
- build-all
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-calibrationnet:
requires:
- build-ntwk-calibration
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-butterflynet:
requires:
- build-ntwk-butterfly
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-packer-nerpanet:
requires:
- build-ntwk-nerpa
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
- publish-snapcraft:
name: publish-snapcraft-stable
channel: stable
filters:
branches:
ignore:
- /.*/
tags:
only:
- /^v\d+\.\d+\.\d+(-rc\d+)?$/
nightly:
triggers:
- schedule:
cron: "0 0 * * *"
filters:
branches:
only:
- master
jobs:
- publish-snapcraft:
name: publish-snapcraft-nightly
channel: edge

16
.github/CODEOWNERS vendored
View File

@ -1,16 +0,0 @@
## filecoin-project/lotus CODEOWNERS
## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
##
## These users or groups will be automatically assigned as reviewers every time
## a PR is submitted that modifies code in the specified locations.
##
## The Lotus repo configuration requires that at least ONE codeowner approves
## the PR before merging.
### Global owners.
* @magik6k @arajasek
### Conformance testing.
conformance/ @ZenGround0
extern/test-vectors @ZenGround0
cmd/tvx @ZenGround0

View File

@ -35,6 +35,10 @@ jobs:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v2 uses: actions/checkout@v2
- uses: actions/setup-go@v1
with:
go-version: '1.16.4'
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v1 uses: github/codeql-action/init@v1

View File

@ -0,0 +1,29 @@
---
name: Testground PR Checker
on: [push]
jobs:
testground:
runs-on: ubuntu-latest
name: ${{ matrix.composition_file }}
strategy:
matrix:
include:
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/baseline-k8s-3-1.toml
- backend_addr: ci.testground.ipfs.team
backend_proto: https
plan_directory: testplans/lotus-soup
composition_file: testplans/lotus-soup/_compositions/paych-stress-k8s.toml
steps:
- uses: actions/checkout@v2
- name: testground run
uses: coryschwartz/testground-github-action@v1.1
with:
backend_addr: ${{ matrix.backend_addr }}
backend_proto: ${{ matrix.backend_proto }}
plan_directory: ${{ matrix.plan_directory }}
composition_file: ${{ matrix.composition_file }}

4
.gitignore vendored
View File

@ -1,3 +1,6 @@
/AppDir
/appimage-builder-cache
*.AppImage
/lotus /lotus
/lotus-miner /lotus-miner
/lotus-worker /lotus-worker
@ -5,6 +8,7 @@
/lotus-health /lotus-health
/lotus-chainwatch /lotus-chainwatch
/lotus-shed /lotus-shed
/lotus-sim
/lotus-pond /lotus-pond
/lotus-townhall /lotus-townhall
/lotus-fountain /lotus-fountain

View File

@ -2,8 +2,8 @@
# 1.10.0 / 2021-06-23 # 1.10.0 / 2021-06-23
This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The This is a mandatory release of Lotus that introduces Filecoin network v13, codenamed the HyperDrive upgrade. The
Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the Filecoin mainnet will upgrade, which is epoch 892800, on 2021-06-30T22:00:00Z. The network upgrade introduces the
following FIPs: following FIPs:
- [FIP-0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md): Add miner batched sector pre-commit method - [FIP-0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.md): Add miner batched sector pre-commit method
@ -20,10 +20,10 @@ FIPs [0008](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0008.m
**Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.** **Check out the documentation [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#precommitsectorsbatch) for details on the new Lotus miner sealing config options, [here](https://docs.filecoin.io/mine/lotus/miner-configuration/#fees-section) for fee config options, and explanations of the new features.**
Note: Note:
- We recommend to keep `PreCommitSectorsBatch` as 1. - We recommend to keep `PreCommitSectorsBatch` as 1.
- We recommend miners to set `PreCommitBatchWait` lower than 30 hours. - We recommend miners to set `PreCommitBatchWait` lower than 30 hours.
- We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures - We recommend miners to set a longer `CommitBatchSlack` and `PreCommitBatchSlack` to prevent message failures
due to expirations. due to expirations.
### Projected state tree growth ### Projected state tree growth
@ -63,35 +63,35 @@ Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-pro
- Always flush when timer goes off ([filecoin-project/lotus#6563](https://github.com/filecoin-project/lotus/pull/6563)) - Always flush when timer goes off ([filecoin-project/lotus#6563](https://github.com/filecoin-project/lotus/pull/6563))
- Update default fees for aggregates ([filecoin-project/lotus#6548](https://github.com/filecoin-project/lotus/pull/6548)) - Update default fees for aggregates ([filecoin-project/lotus#6548](https://github.com/filecoin-project/lotus/pull/6548))
- sealing: Early finalization option ([filecoin-project/lotus#6452](https://github.com/filecoin-project/lotus/pull/6452)) - sealing: Early finalization option ([filecoin-project/lotus#6452](https://github.com/filecoin-project/lotus/pull/6452))
- `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting - `./lotus-miner/config.toml/[Sealing.FinalizeEarly]`: default to false. Enable if you want to FinalizeSector before commiting
- Add filplus utils to CLI ([filecoin-project/lotus#6351](https://github.com/filecoin-project/lotus/pull/6351)) - Add filplus utils to CLI ([filecoin-project/lotus#6351](https://github.com/filecoin-project/lotus/pull/6351))
- cli doc can be found [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus.md#lotus-filplus) - cli doc can be found [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/cli-lotus.md#lotus-filplus)
- Add miner-side MaxDealStartDelay config ([filecoin-project/lotus#6576](https://github.com/filecoin-project/lotus/pull/6576)) - Add miner-side MaxDealStartDelay config ([filecoin-project/lotus#6576](https://github.com/filecoin-project/lotus/pull/6576))
### Bug Fixes ### Bug Fixes
- chainstore: Don't take heaviestLk with backlogged reorgCh ([filecoin-project/lotus#6526](https://github.com/filecoin-project/lotus/pull/6526)) - chainstore: Don't take heaviestLk with backlogged reorgCh ([filecoin-project/lotus#6526](https://github.com/filecoin-project/lotus/pull/6526))
- Backport #6041 - storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6519](https://github.com/filecoin-project/lotus/pull/6519)) - Backport #6041 - storagefsm: Fix batch deal packing behavior ([filecoin-project/lotus#6519](https://github.com/filecoin-project/lotus/pull/6519))
- backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503)) - backport: pick the correct partitions-per-post limit ([filecoin-project/lotus#6503](https://github.com/filecoin-project/lotus/pull/6503))
- failed sectors should be added into res correctly ([filecoin-project/lotus#6472](https://github.com/filecoin-project/lotus/pull/6472)) - failed sectors should be added into res correctly ([filecoin-project/lotus#6472](https://github.com/filecoin-project/lotus/pull/6472))
- sealing: Fix restartSectors race ([filecoin-project/lotus#6491](https://github.com/filecoin-project/lotus/pull/6491)) - sealing: Fix restartSectors race ([filecoin-project/lotus#6491](https://github.com/filecoin-project/lotus/pull/6491))
- Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428)) - Fund miners with the aggregate fee when ProveCommitting ([filecoin-project/lotus#6428](https://github.com/filecoin-project/lotus/pull/6428))
- Commit and Precommit batcher cannot share a getSectorDeadline method ([filecoin-project/lotus#6416](https://github.com/filecoin-project/lotus/pull/6416)) - Commit and Precommit batcher cannot share a getSectorDeadline method ([filecoin-project/lotus#6416](https://github.com/filecoin-project/lotus/pull/6416))
- Fix supported proof type manipulations for v5 actors ([filecoin-project/lotus#6366](https://github.com/filecoin-project/lotus/pull/6366)) - Fix supported proof type manipulations for v5 actors ([filecoin-project/lotus#6366](https://github.com/filecoin-project/lotus/pull/6366))
- events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6362](https://github.com/filecoin-project/lotus/pull/6362)) - events: Fix handling of multiple matched events per epoch ([filecoin-project/lotus#6362](https://github.com/filecoin-project/lotus/pull/6362))
- Fix randomness fetching around null blocks ([filecoin-project/lotus#6240](https://github.com/filecoin-project/lotus/pull/6240)) - Fix randomness fetching around null blocks ([filecoin-project/lotus#6240](https://github.com/filecoin-project/lotus/pull/6240))
### Improvements ### Improvements
- Appimage v1.10.0 rc3 ([filecoin-project/lotus#6492](https://github.com/filecoin-project/lotus/pull/6492)) - Appimage v1.10.0 rc3 ([filecoin-project/lotus#6492](https://github.com/filecoin-project/lotus/pull/6492))
- Expand on Drand change testing ([filecoin-project/lotus#6500](https://github.com/filecoin-project/lotus/pull/6500)) - Expand on Drand change testing ([filecoin-project/lotus#6500](https://github.com/filecoin-project/lotus/pull/6500))
- Backport Fix logging around mineOne ([filecoin-project/lotus#6499](https://github.com/filecoin-project/lotus/pull/6499)) - Backport Fix logging around mineOne ([filecoin-project/lotus#6499](https://github.com/filecoin-project/lotus/pull/6499))
- mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453)) - mpool: Add more metrics ([filecoin-project/lotus#6453](https://github.com/filecoin-project/lotus/pull/6453))
- Merge backported PRs into v1.10 release branch ([filecoin-project/lotus#6436](https://github.com/filecoin-project/lotus/pull/6436)) - Merge backported PRs into v1.10 release branch ([filecoin-project/lotus#6436](https://github.com/filecoin-project/lotus/pull/6436))
- Fix tests ([filecoin-project/lotus#6371](https://github.com/filecoin-project/lotus/pull/6371)) - Fix tests ([filecoin-project/lotus#6371](https://github.com/filecoin-project/lotus/pull/6371))
- Extend the default deal start epoch delay ([filecoin-project/lotus#6350](https://github.com/filecoin-project/lotus/pull/6350)) - Extend the default deal start epoch delay ([filecoin-project/lotus#6350](https://github.com/filecoin-project/lotus/pull/6350))
- sealing: Wire up context to batchers ([filecoin-project/lotus#6497](https://github.com/filecoin-project/lotus/pull/6497)) - sealing: Wire up context to batchers ([filecoin-project/lotus#6497](https://github.com/filecoin-project/lotus/pull/6497))
- Improve address resolution for messages ([filecoin-project/lotus#6364](https://github.com/filecoin-project/lotus/pull/6364)) - Improve address resolution for messages ([filecoin-project/lotus#6364](https://github.com/filecoin-project/lotus/pull/6364))
### Dependency Updates ### Dependency Updates
- Proofs v8.0.2 ([filecoin-project/lotus#6524](https://github.com/filecoin-project/lotus/pull/6524)) - Proofs v8.0.2 ([filecoin-project/lotus#6524](https://github.com/filecoin-project/lotus/pull/6524))
- Update to fixed Bellperson ([filecoin-project/lotus#6480](https://github.com/filecoin-project/lotus/pull/6480)) - Update to fixed Bellperson ([filecoin-project/lotus#6480](https://github.com/filecoin-project/lotus/pull/6480))
@ -102,7 +102,7 @@ Included in the HyperDrive upgrade is [FIP-0015](https://github.com/filecoin-pro
- github.com/filecoin-project/go-hamt-ipld/v3 (v3.0.1 -> v3.1.0) - github.com/filecoin-project/go-hamt-ipld/v3 (v3.0.1 -> v3.1.0)
- github.com/ipfs/go-log/v2 (v2.1.2-0.20200626104915-0016c0b4b3e4 -> v2.1.3) - github.com/ipfs/go-log/v2 (v2.1.2-0.20200626104915-0016c0b4b3e4 -> v2.1.3)
- github.com/filecoin-project/go-amt-ipld/v3 (v3.0.0 -> v3.1.0) - github.com/filecoin-project/go-amt-ipld/v3 (v3.0.0 -> v3.1.0)
### Network Version v13 HyperDrive Upgrade ### Network Version v13 HyperDrive Upgrade
- Set HyperDrive upgrade epoch ([filecoin-project/lotus#6565](https://github.com/filecoin-project/lotus/pull/6565)) - Set HyperDrive upgrade epoch ([filecoin-project/lotus#6565](https://github.com/filecoin-project/lotus/pull/6565))
- version bump to lotus v1.10.0-rc6 ([filecoin-project/lotus#6529](https://github.com/filecoin-project/lotus/pull/6529)) - version bump to lotus v1.10.0-rc6 ([filecoin-project/lotus#6529](https://github.com/filecoin-project/lotus/pull/6529))
@ -208,143 +208,6 @@ This is an optional Lotus release that introduces various improvements to the se
- fix health report (https://github.com/filecoin-project/lotus/pull/6011) - fix health report (https://github.com/filecoin-project/lotus/pull/6011)
- fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011)) - fix(ci): Use recent ubuntu LTS release; Update release params ((https://github.com/filecoin-project/lotus/pull/6011))
# 1.9.0-rc4 / 2021-05-13
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
## Highlights
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
- go-fil-markets v1.1.9 -> v1.2.5
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
- rust-fil-proofs v5.4.1 -> v7.0.1
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
## Changes
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
- State CLI improvements (State CLI improvements)
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
- Add a mining-heartbeat INFO line at every epoch (https://github.com/filecoin-project/lotus/pull/6183)
- chore(ci): Enable build on RC tags (https://github.com/filecoin-project/lotus/pull/6245)
- Upgrade nerpa to actor v4 and bump the version to rc4 (https://github.com/filecoin-project/lotus/pull/6249)
## Fixes
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
# 1.9.0-rc2 / 2021-04-30
This is an optional Lotus release that introduces various improvements to the sealing, mining, and deal-making processes.
## Highlights
- OpenRPC Support (https://github.com/filecoin-project/lotus/pull/5843)
- Take latency into account when making interactive deals (https://github.com/filecoin-project/lotus/pull/5876)
- Update go-commp-utils for >10x faster client commp calculation (https://github.com/filecoin-project/lotus/pull/5892)
- add `lotus client cancel-retrieval` cmd to lotus CLI (https://github.com/filecoin-project/lotus/pull/5871)
- add `inspect-deal` command to `lotus client` (https://github.com/filecoin-project/lotus/pull/5833)
- Local retrieval support (https://github.com/filecoin-project/lotus/pull/5917)
- go-fil-markets v1.1.9 -> v1.2.5
- For a detailed changelog see https://github.com/filecoin-project/go-fil-markets/blob/master/CHANGELOG.md
- rust-fil-proofs v5.4.1 -> v7
- For a detailed changelog see https://github.com/filecoin-project/rust-fil-proofs/blob/master/CHANGELOG.md
## Changes
- storagefsm: Apply global events even in broken states (https://github.com/filecoin-project/lotus/pull/5962)
- Default the AlwaysKeepUnsealedCopy flag to true (https://github.com/filecoin-project/lotus/pull/5743)
- splitstore: compact hotstore prior to garbage collection (https://github.com/filecoin-project/lotus/pull/5778)
- ipfs-force bootstrapper update (https://github.com/filecoin-project/lotus/pull/5799)
- better logging when unsealing fails (https://github.com/filecoin-project/lotus/pull/5851)
- perf: add cache for gas permium estimation (https://github.com/filecoin-project/lotus/pull/5709)
- backupds: Compact log on restart (https://github.com/filecoin-project/lotus/pull/5875)
- backupds: Improve truncated log handling (https://github.com/filecoin-project/lotus/pull/5891)
- State CLI improvements (State CLI improvements)
- API proxy struct codegen (https://github.com/filecoin-project/lotus/pull/5854)
- move DI stuff for paychmgr into modules (https://github.com/filecoin-project/lotus/pull/5791)
- Implement Event observer and Settings for 3rd party dep injection (https://github.com/filecoin-project/lotus/pull/5693)
- Export developer and network commands for consumption by derivatives of Lotus (https://github.com/filecoin-project/lotus/pull/5864)
- mock sealer: Simulate randomness sideeffects (https://github.com/filecoin-project/lotus/pull/5805)
- localstorage: Demote reservation stat error to debug (https://github.com/filecoin-project/lotus/pull/5976)
- shed command to unpack miner info dumps (https://github.com/filecoin-project/lotus/pull/5800)
- Add two utils to Lotus-shed (https://github.com/filecoin-project/lotus/pull/5867)
- add shed election estimate command (https://github.com/filecoin-project/lotus/pull/5092)
- Add --actor flag in lotus-shed sectors terminate (https://github.com/filecoin-project/lotus/pull/5819)
- Move lotus mpool clear to lotus-shed (https://github.com/filecoin-project/lotus/pull/5900)
- Centralize everything on ipfs/go-log/v2 (https://github.com/filecoin-project/lotus/pull/5974)
- expose NextID from nice market actor interface (https://github.com/filecoin-project/lotus/pull/5850)
- add available options for perm on error (https://github.com/filecoin-project/lotus/pull/5814)
- API docs clarification: Document StateSearchMsg replaced message behavior (https://github.com/filecoin-project/lotus/pull/5838)
- api: Document StateReplay replaced message behavior (https://github.com/filecoin-project/lotus/pull/5840)
- add godocs to miner objects (https://github.com/filecoin-project/lotus/pull/2184)
- Add description to the client deal CLI command (https://github.com/filecoin-project/lotus/pull/5999)
- lint: don't skip builtin (https://github.com/filecoin-project/lotus/pull/5881)
- use deal duration from actors (https://github.com/filecoin-project/lotus/pull/5270)
- remote calc winningpost proof (https://github.com/filecoin-project/lotus/pull/5884)
- packer: other network images (https://github.com/filecoin-project/lotus/pull/5930)
- Convert the chainstore lock to RW (https://github.com/filecoin-project/lotus/pull/5971)
- Remove CachedBlockstore (https://github.com/filecoin-project/lotus/pull/5972)
- remove messagepool CapGasFee duplicate code (https://github.com/filecoin-project/lotus/pull/5992)
## Fixes
- return buffers after canceling badger operation (https://github.com/filecoin-project/lotus/pull/5796)
- avoid holding a lock while calling the View callback (https://github.com/filecoin-project/lotus/pull/5792)
- storagefsm: Trigger input processing when below limits (https://github.com/filecoin-project/lotus/pull/5801)
- After importing a previously deleted key, be able to delete it again (https://github.com/filecoin-project/lotus/pull/4653)
- fix StateManager.Replay on reward actor (https://github.com/filecoin-project/lotus/pull/5804)
- make sure atomic 64bit fields are 64bit aligned (https://github.com/filecoin-project/lotus/pull/5794)
- Import secp sigs in paych tests (https://github.com/filecoin-project/lotus/pull/5879)
- fix ci build-macos (https://github.com/filecoin-project/lotus/pull/5934)
- Fix creation of remainder account when it's not a multisig (https://github.com/filecoin-project/lotus/pull/5807)
- Fix fallback chainstore (https://github.com/filecoin-project/lotus/pull/6003)
- fix 4857: show help for set-addrs (https://github.com/filecoin-project/lotus/pull/5943)
- fix health report (https://github.com/filecoin-project/lotus/pull/6011)
# 1.8.0 / 2021-04-05 # 1.8.0 / 2021-04-05
This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z. This is a mandatory release of Lotus that upgrades the network to version 12, which introduces various performance improvements to the cron processing of the power actor. The network will upgrade at height 712320, which is 2021-04-29T06:00:00Z.

View File

@ -1,4 +1,4 @@
FROM golang:1.15.6 AS builder-deps FROM golang:1.16.4 AS builder-deps
MAINTAINER Lotus Development Team MAINTAINER Lotus Development Team
RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev

View File

@ -6,9 +6,9 @@ all: build
unexport GOFLAGS unexport GOFLAGS
GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}') GOVERSION:=$(shell go version | cut -d' ' -f 3 | sed 's/^go//' | awk -F. '{printf "%d%03d%03d", $$1, $$2, $$3}')
ifeq ($(shell expr $(GOVERSION) \< 1015005), 1) ifeq ($(shell expr $(GOVERSION) \< 1016000), 1)
$(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000)) $(warning Your Golang version is go$(shell expr $(GOVERSION) / 1000000).$(shell expr $(GOVERSION) % 1000000 / 1000).$(shell expr $(GOVERSION) % 1000))
$(error Update Golang to version to at least 1.15.5) $(error Update Golang to version to at least 1.16.0)
endif endif
# git modules that need to be loaded # git modules that need to be loaded
@ -47,7 +47,6 @@ BUILD_DEPS+=ffi-version-check
.PHONY: ffi-version-check .PHONY: ffi-version-check
$(MODULES): build/.update-modules ; $(MODULES): build/.update-modules ;
# dummy file that marks the last time modules were updated # dummy file that marks the last time modules were updated
build/.update-modules: build/.update-modules:
@ -81,10 +80,12 @@ nerpanet: build-devnets
butterflynet: GOFLAGS+=-tags=butterflynet butterflynet: GOFLAGS+=-tags=butterflynet
butterflynet: build-devnets butterflynet: build-devnets
interopnet: GOFLAGS+=-tags=interopnet
interopnet: build-devnets
lotus: $(BUILD_DEPS) lotus: $(BUILD_DEPS)
rm -f lotus rm -f lotus
go build $(GOFLAGS) -o lotus ./cmd/lotus go build $(GOFLAGS) -o lotus ./cmd/lotus
go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build
.PHONY: lotus .PHONY: lotus
BINS+=lotus BINS+=lotus
@ -92,21 +93,18 @@ BINS+=lotus
lotus-miner: $(BUILD_DEPS) lotus-miner: $(BUILD_DEPS)
rm -f lotus-miner rm -f lotus-miner
go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner
go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build
.PHONY: lotus-miner .PHONY: lotus-miner
BINS+=lotus-miner BINS+=lotus-miner
lotus-worker: $(BUILD_DEPS) lotus-worker: $(BUILD_DEPS)
rm -f lotus-worker rm -f lotus-worker
go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build
.PHONY: lotus-worker .PHONY: lotus-worker
BINS+=lotus-worker BINS+=lotus-worker
lotus-shed: $(BUILD_DEPS) lotus-shed: $(BUILD_DEPS)
rm -f lotus-shed rm -f lotus-shed
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build
.PHONY: lotus-shed .PHONY: lotus-shed
BINS+=lotus-shed BINS+=lotus-shed
@ -138,7 +136,6 @@ install-worker:
lotus-seed: $(BUILD_DEPS) lotus-seed: $(BUILD_DEPS)
rm -f lotus-seed rm -f lotus-seed
go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed
go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build
.PHONY: lotus-seed .PHONY: lotus-seed
BINS+=lotus-seed BINS+=lotus-seed
@ -172,13 +169,11 @@ lotus-townhall-front:
.PHONY: lotus-townhall-front .PHONY: lotus-townhall-front
lotus-townhall-app: lotus-touch lotus-townhall-front lotus-townhall-app: lotus-touch lotus-townhall-front
go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build
.PHONY: lotus-townhall-app .PHONY: lotus-townhall-app
lotus-fountain: lotus-fountain:
rm -f lotus-fountain rm -f lotus-fountain
go build -o lotus-fountain ./cmd/lotus-fountain go build -o lotus-fountain ./cmd/lotus-fountain
go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
.PHONY: lotus-fountain .PHONY: lotus-fountain
BINS+=lotus-fountain BINS+=lotus-fountain
@ -191,28 +186,24 @@ BINS+=lotus-chainwatch
lotus-bench: lotus-bench:
rm -f lotus-bench rm -f lotus-bench
go build -o lotus-bench ./cmd/lotus-bench go build -o lotus-bench ./cmd/lotus-bench
go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build
.PHONY: lotus-bench .PHONY: lotus-bench
BINS+=lotus-bench BINS+=lotus-bench
lotus-stats: lotus-stats:
rm -f lotus-stats rm -f lotus-stats
go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats go build $(GOFLAGS) -o lotus-stats ./cmd/lotus-stats
go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build
.PHONY: lotus-stats .PHONY: lotus-stats
BINS+=lotus-stats BINS+=lotus-stats
lotus-pcr: lotus-pcr:
rm -f lotus-pcr rm -f lotus-pcr
go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build
.PHONY: lotus-pcr .PHONY: lotus-pcr
BINS+=lotus-pcr BINS+=lotus-pcr
lotus-health: lotus-health:
rm -f lotus-health rm -f lotus-health
go build -o lotus-health ./cmd/lotus-health go build -o lotus-health ./cmd/lotus-health
go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build
.PHONY: lotus-health .PHONY: lotus-health
BINS+=lotus-health BINS+=lotus-health
@ -243,6 +234,12 @@ BINS+=tvx
install-chainwatch: lotus-chainwatch install-chainwatch: lotus-chainwatch
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
lotus-sim: $(BUILD_DEPS)
rm -f lotus-sim
go build $(GOFLAGS) -o lotus-sim ./cmd/lotus-sim
.PHONY: lotus-sim
BINS+=lotus-sim
# SYSTEMD # SYSTEMD
install-daemon-service: install-daemon install-daemon-service: install-daemon
@ -303,17 +300,10 @@ clean-services: clean-all-services
buildall: $(BINS) buildall: $(BINS)
completions:
./scripts/make-completions.sh lotus
./scripts/make-completions.sh lotus-miner
.PHONY: completions
install-completions: install-completions:
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/ mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner
clean: clean:
rm -rf $(CLEAN) $(BINS) rm -rf $(CLEAN) $(BINS)
@ -343,17 +333,15 @@ api-gen:
goimports -w api goimports -w api
.PHONY: api-gen .PHONY: api-gen
appimage: $(BUILD_DEPS) appimage: lotus
rm -rf appimage-builder-cache || true rm -rf appimage-builder-cache || true
rm AppDir/io.filecoin.lotus.desktop || true rm AppDir/io.filecoin.lotus.desktop || true
rm AppDir/icon.svg || true rm AppDir/icon.svg || true
rm Appdir/AppRun || true rm Appdir/AppRun || true
mkdir -p AppDir/usr/bin mkdir -p AppDir/usr/bin
rm -rf lotus
go run github.com/GeertJohan/go.rice/rice embed-go -i ./build
go build $(GOFLAGS) -o lotus ./cmd/lotus
cp ./lotus AppDir/usr/bin/ cp ./lotus AppDir/usr/bin/
appimage-builder appimage-builder
docsgen: docsgen-md docsgen-openrpc docsgen: docsgen-md docsgen-openrpc
docsgen-md-bin: api-gen actors-gen docsgen-md-bin: api-gen actors-gen
@ -382,8 +370,21 @@ docsgen-openrpc-worker: docsgen-openrpc-bin
.PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin .PHONY: docsgen docsgen-md-bin docsgen-openrpc-bin
gen: actors-gen type-gen method-gen docsgen api-gen gen: actors-gen type-gen method-gen docsgen api-gen circleci
@echo ">>> IF YOU'VE MODIFIED THE CLI, REMEMBER TO ALSO MAKE docsgen-cli"
.PHONY: gen .PHONY: gen
snap: lotus lotus-miner lotus-worker
snapcraft
# snapcraft upload ./lotus_*.snap
# separate from gen because it needs binaries
docsgen-cli: lotus lotus-miner lotus-worker
python ./scripts/generate-lotus-cli.py
.PHONY: docsgen-cli
print-%: print-%:
@echo $*=$($*) @echo $*=$($*)
circleci:
go generate -x ./.circleci

View File

@ -10,7 +10,7 @@
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a> <a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a> <a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a> <a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.15.5-blue.svg" /></a> <a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.16-blue.svg" /></a>
<br> <br>
</p> </p>
@ -18,7 +18,7 @@ Lotus is an implementation of the Filecoin Distributed Storage Network. For more
## Building & Documentation ## Building & Documentation
For instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). For complete instructions on how to build, install and setup lotus, please visit [https://docs.filecoin.io/get-started/lotus](https://docs.filecoin.io/get-started/lotus/). Basic build instructions can be found further down in this readme.
## Reporting a Vulnerability ## Reporting a Vulnerability
@ -50,6 +50,88 @@ When implementing a change:
7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description. 7. Title the PR in a meaningful way and describe the rationale and the thought process in the PR description.
8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message. 8. Write clean, thoughtful, and detailed [commit messages](https://chris.beams.io/posts/git-commit/). This is even more important than the PR description, because commit messages are stored _inside_ the Git history. One good rule is: if you are happy posting the commit message as the PR description, then it's a good commit message.
## Basic Build Instructions
**System-specific Software Dependencies**:
Building Lotus requires some system dependencies, usually provided by your distribution.
Ubuntu/Debian:
```
sudo apt install mesa-opencl-icd ocl-icd-opencl-dev gcc git bzr jq pkg-config curl clang build-essential hwloc libhwloc-dev wget -y && sudo apt upgrade -y
```
Fedora:
```
sudo dnf -y install gcc make git bzr jq pkgconfig mesa-libOpenCL mesa-libOpenCL-devel opencl-headers ocl-icd ocl-icd-devel clang llvm wget hwloc libhwloc-dev
```
For other distributions you can find the required dependencies [here.](https://docs.filecoin.io/get-started/lotus/installation/#system-specific) For instructions specific to macOS, you can find them [here.](https://docs.filecoin.io/get-started/lotus/installation/#macos)
#### Go
To build Lotus, you need a working installation of [Go 1.16.4 or higher](https://golang.org/dl/):
```bash
wget -c https://golang.org/dl/go1.16.4.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local
```
**TIP:**
You'll need to add `/usr/local/go/bin` to your path. For most Linux distributions you can run something like:
```shell
echo "export PATH=$PATH:/usr/local/go/bin" >> ~/.bashrc && source ~/.bashrc
```
See the [official Golang installation instructions](https://golang.org/doc/install) if you get stuck.
### Build and install Lotus
Once all the dependencies are installed, you can build and install the Lotus suite (`lotus`, `lotus-miner`, and `lotus-worker`).
1. Clone the repository:
```sh
git clone https://github.com/filecoin-project/lotus.git
cd lotus/
```
Note: The default branch `master` is the dev branch where the latest new features, bug fixes and improvement are in. However, if you want to run lotus on Filecoin mainnet and want to run a production-ready lotus, get the latest release[ here](https://github.com/filecoin-project/lotus/releases).
2. To join mainnet, checkout the [latest release](https://github.com/filecoin-project/lotus/releases).
If you are changing networks from a previous Lotus installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding.
For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Lotus for your specific network below.
```sh
git checkout <tag_or_branch>
# For example:
git checkout <vX.X.X> # tag for a release
```
Currently, the latest code on the _master_ branch corresponds to mainnet.
3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)".
4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed.
5. Build and install Lotus:
```sh
make clean all #mainnet
# Or to join a testnet or devnet:
make clean calibnet # Calibration with min 32GiB sectors
make clean nerpanet # Nerpa with min 512MiB sectors
sudo make install
```
This will put `lotus`, `lotus-miner` and `lotus-worker` in `/usr/local/bin`.
`lotus` will use the `$HOME/.lotus` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the Lotus folder.
6. You should now have Lotus installed. You can now [start the Lotus daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain).
## License ## License
Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE)

View File

@ -252,6 +252,13 @@ type FullNode interface {
// MpoolBatchPushMessage batch pushes a unsigned message to mempool. // MpoolBatchPushMessage batch pushes a unsigned message to mempool.
MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign MpoolBatchPushMessage(context.Context, []*types.Message, *MessageSendSpec) ([]*types.SignedMessage, error) //perm:sign
// MpoolCheckMessages performs logical checks on a batch of messages
MpoolCheckMessages(context.Context, []*MessagePrototype) ([][]MessageCheckStatus, error) //perm:read
// MpoolCheckPendingMessages performs logical checks for all pending messages from a given address
MpoolCheckPendingMessages(context.Context, address.Address) ([][]MessageCheckStatus, error) //perm:read
// MpoolCheckReplaceMessages performs logical checks on pending messages with replacement
MpoolCheckReplaceMessages(context.Context, []*types.Message) ([][]MessageCheckStatus, error) //perm:read
// MpoolGetNonce gets next nonce for the specified sender. // MpoolGetNonce gets next nonce for the specified sender.
// Note that this method may not be atomic. Use MpoolPushMessage instead. // Note that this method may not be atomic. Use MpoolPushMessage instead.
MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read MpoolGetNonce(context.Context, address.Address) (uint64, error) //perm:read
@ -316,6 +323,8 @@ type FullNode interface {
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
// ClientStartDeal proposes a deal with a miner. // ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:admin
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
ClientStatelessDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) //perm:write
// ClientGetDealInfo returns the latest information about a given deal. // ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client. // ClientListDeals returns information about the deals made by the local client.
@ -335,6 +344,10 @@ type FullNode interface {
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
// of status updates. // of status updates.
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write
// ClientGetRetrievalUpdates returns status of updated retrieval deals
ClientGetRetrievalUpdates(ctx context.Context) (<-chan RetrievalInfo, error) //perm:write
// ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientQueryAsk returns a signed StorageAsk from the specified miner.
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID // ClientCalcCommP calculates the CommP and data size of the specified CID
@ -579,15 +592,16 @@ type FullNode interface {
// MsigCreate creates a multisig wallet // MsigCreate creates a multisig wallet
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration> // It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
//<initial balance>, <sender address of the create msg>, <gas price> //<initial balance>, <sender address of the create msg>, <gas price>
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) //perm:sign MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (*MessagePrototype, error) //perm:sign
// MsigPropose proposes a multisig message // MsigPropose proposes a multisig message
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>, // It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message> // <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigApprove approves a previously-proposed multisig message by transaction ID // MsigApprove approves a previously-proposed multisig message by transaction ID
// It takes the following params: <multisig address>, <proposed transaction ID> <signer address> // It takes the following params: <multisig address>, <proposed transaction ID> <signer address>
MsigApprove(context.Context, address.Address, uint64, address.Address) (cid.Cid, error) //perm:sign MsigApprove(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign
// MsigApproveTxnHash approves a previously-proposed multisig message, specified // MsigApproveTxnHash approves a previously-proposed multisig message, specified
// using both transaction ID and a hash of the parameters used in the // using both transaction ID and a hash of the parameters used in the
@ -595,43 +609,49 @@ type FullNode interface {
// exactly the transaction you think you are. // exactly the transaction you think you are.
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>, // It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message> // <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message // MsigCancel cancels a previously-proposed multisig message
// It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>, // It takes the following params: <multisig address>, <proposed transaction ID>, <recipient address>, <value to transfer>,
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message> // <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign
// MsigAddPropose proposes adding a signer in the multisig // MsigAddPropose proposes adding a signer in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>, // It takes the following params: <multisig address>, <sender address of the propose msg>,
// <new signer>, <whether the number of required signers should be increased> // <new signer>, <whether the number of required signers should be increased>
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
// MsigAddApprove approves a previously proposed AddSigner message // MsigAddApprove approves a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>, // It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <new signer>, <whether the number of required signers should be increased> // <proposer address>, <new signer>, <whether the number of required signers should be increased>
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error) //perm:sign MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (*MessagePrototype, error) //perm:sign
// MsigAddCancel cancels a previously proposed AddSigner message // MsigAddCancel cancels a previously proposed AddSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>, // It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <new signer>, <whether the number of required signers should be increased> // <new signer>, <whether the number of required signers should be increased>
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error) //perm:sign MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (*MessagePrototype, error) //perm:sign
// MsigSwapPropose proposes swapping 2 signers in the multisig // MsigSwapPropose proposes swapping 2 signers in the multisig
// It takes the following params: <multisig address>, <sender address of the propose msg>, // It takes the following params: <multisig address>, <sender address of the propose msg>,
// <old signer>, <new signer> // <old signer>, <new signer>
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
// MsigSwapApprove approves a previously proposed SwapSigner // MsigSwapApprove approves a previously proposed SwapSigner
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>, // It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
// <proposer address>, <old signer>, <new signer> // <proposer address>, <old signer>, <new signer>
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) //perm:sign MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
// MsigSwapCancel cancels a previously proposed SwapSigner message // MsigSwapCancel cancels a previously proposed SwapSigner message
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>, // It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
// <old signer>, <new signer> // <old signer>, <new signer>
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) //perm:sign MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (*MessagePrototype, error) //perm:sign
// MsigRemoveSigner proposes the removal of a signer from the multisig. // MsigRemoveSigner proposes the removal of a signer from the multisig.
// It accepts the multisig to make the change on, the proposer address to // It accepts the multisig to make the change on, the proposer address to
// send the message from, the address to be removed, and a boolean // send the message from, the address to be removed, and a boolean
// indicating whether or not the signing threshold should be lowered by one // indicating whether or not the signing threshold should be lowered by one
// along with the address removal. // along with the address removal.
MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) //perm:sign MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (*MessagePrototype, error) //perm:sign
// MarketAddBalance adds funds to the market actor // MarketAddBalance adds funds to the market actor
MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign MarketAddBalance(ctx context.Context, wallet, addr address.Address, amt types.BigInt) (cid.Cid, error) //perm:sign
@ -664,6 +684,11 @@ type FullNode interface {
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) //perm:write
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error) //perm:sign
// MethodGroup: Node
// These methods are general node management and status commands
NodeStatus(ctx context.Context, inclChainStatus bool) (NodeStatus, error) //perm:read
// CreateBackup creates node backup onder the specified file name. The // CreateBackup creates node backup onder the specified file name. The
// method requires that the lotus daemon is running with the // method requires that the lotus daemon is running with the
// LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that // LOTUS_BACKUP_BASE_PATH environment variable set to some path, and that

View File

@ -58,4 +58,5 @@ type Gateway interface {
StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) StateSearchMsg(ctx context.Context, from types.TipSetKey, msg cid.Cid, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error) StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64, limit abi.ChainEpoch, allowReplaced bool) (*MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error) WalletBalance(context.Context, address.Address) (types.BigInt, error)
Version(context.Context) (APIVersion, error)
} }

View File

@ -35,13 +35,13 @@ type MsgMeta struct {
} }
type Wallet interface { type Wallet interface {
WalletNew(context.Context, types.KeyType) (address.Address, error) WalletNew(context.Context, types.KeyType) (address.Address, error) //perm:admin
WalletHas(context.Context, address.Address) (bool, error) WalletHas(context.Context, address.Address) (bool, error) //perm:admin
WalletList(context.Context) ([]address.Address, error) WalletList(context.Context) ([]address.Address, error) //perm:admin
WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta MsgMeta) (*crypto.Signature, error) //perm:admin
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
WalletImport(context.Context, *types.KeyInfo) (address.Address, error) WalletImport(context.Context, *types.KeyInfo) (address.Address, error) //perm:admin
WalletDelete(context.Context, address.Address) error WalletDelete(context.Context, address.Address) error //perm:admin
} }

View File

@ -2,7 +2,6 @@ package api
import ( import (
"context" "context"
"io"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -43,7 +42,6 @@ type Worker interface {
ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin
MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin
UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin
ReadPiece(context.Context, io.Writer, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (storiface.CallID, error) //perm:admin
Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin Fetch(context.Context, storage.SectorRef, storiface.SectorFileType, storiface.PathType, storiface.AcquireMode) (storiface.CallID, error) //perm:admin
TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin TaskDisable(ctx context.Context, tt sealtasks.TaskType) error //perm:admin

View File

@ -0,0 +1,35 @@
// Code generated by "stringer -type=CheckStatusCode -trimprefix=CheckStatus"; DO NOT EDIT.
package api
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[CheckStatusMessageSerialize-1]
_ = x[CheckStatusMessageSize-2]
_ = x[CheckStatusMessageValidity-3]
_ = x[CheckStatusMessageMinGas-4]
_ = x[CheckStatusMessageMinBaseFee-5]
_ = x[CheckStatusMessageBaseFee-6]
_ = x[CheckStatusMessageBaseFeeLowerBound-7]
_ = x[CheckStatusMessageBaseFeeUpperBound-8]
_ = x[CheckStatusMessageGetStateNonce-9]
_ = x[CheckStatusMessageNonce-10]
_ = x[CheckStatusMessageGetStateBalance-11]
_ = x[CheckStatusMessageBalance-12]
}
const _CheckStatusCode_name = "MessageSerializeMessageSizeMessageValidityMessageMinGasMessageMinBaseFeeMessageBaseFeeMessageBaseFeeLowerBoundMessageBaseFeeUpperBoundMessageGetStateNonceMessageNonceMessageGetStateBalanceMessageBalance"
var _CheckStatusCode_index = [...]uint8{0, 16, 27, 42, 55, 72, 86, 110, 134, 154, 166, 188, 202}
func (i CheckStatusCode) String() string {
i -= 1
if i < 0 || i >= CheckStatusCode(len(_CheckStatusCode_index)-1) {
return "CheckStatusCode(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _CheckStatusCode_name[_CheckStatusCode_index[i]:_CheckStatusCode_index[i+1]]
}

View File

@ -261,6 +261,9 @@ func init() {
}, },
"methods": []interface{}{}}, "methods": []interface{}{}},
) )
addExample(api.CheckStatusCode(0))
addExample(map[string]interface{}{"abc": 123})
} }
func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) { func GetAPIType(name, pkg string) (i interface{}, t, permStruct, commonPermStruct reflect.Type) {

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,6 @@ package api
import ( import (
"context" "context"
"io"
"time" "time"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -180,6 +179,8 @@ type FullNodeStruct struct {
ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"` ClientGetDealUpdates func(p0 context.Context) (<-chan DealInfo, error) `perm:"write"`
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan RetrievalInfo, error) `perm:"write"`
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"` ClientImport func(p0 context.Context, p1 FileRef) (*ImportRes, error) `perm:"admin"`
@ -190,6 +191,8 @@ type FullNodeStruct struct {
ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"` ClientListImports func(p0 context.Context) ([]Import, error) `perm:"write"`
ClientListRetrievals func(p0 context.Context) ([]RetrievalInfo, error) `perm:"write"`
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"` ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) `perm:"read"`
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
@ -206,6 +209,8 @@ type FullNodeStruct struct {
ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientStatelessDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"write"`
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
@ -236,6 +241,12 @@ type FullNodeStruct struct {
MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"` MpoolBatchPushUntrusted func(p0 context.Context, p1 []*types.SignedMessage) ([]cid.Cid, error) `perm:"write"`
MpoolCheckMessages func(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) `perm:"read"`
MpoolCheckPendingMessages func(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) `perm:"read"`
MpoolCheckReplaceMessages func(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) `perm:"read"`
MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"` MpoolClear func(p0 context.Context, p1 bool) error `perm:"write"`
MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"` MpoolGetConfig func(p0 context.Context) (*types.MpoolConfig, error) `perm:"read"`
@ -256,19 +267,19 @@ type FullNodeStruct struct {
MpoolSub func(p0 context.Context) (<-chan MpoolUpdate, error) `perm:"read"` MpoolSub func(p0 context.Context) (<-chan MpoolUpdate, error) `perm:"read"`
MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) `perm:"sign"` MsigAddApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) `perm:"sign"`
MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) `perm:"sign"` MsigAddCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) `perm:"sign"`
MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` MsigAddPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"`
MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) `perm:"sign"` MsigApprove func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"`
MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) `perm:"sign"` MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) `perm:"sign"` MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) `perm:"sign"` MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"`
MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `perm:"read"`
@ -278,15 +289,17 @@ type FullNodeStruct struct {
MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `perm:"read"` MsigGetVestingSchedule func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (MsigVesting, error) `perm:"read"`
MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) `perm:"sign"` MsigPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) `perm:"sign"`
MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) `perm:"sign"` MsigRemoveSigner func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) `perm:"sign"`
MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) `perm:"sign"` MsigSwapApprove func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) `perm:"sign"`
MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) `perm:"sign"` MsigSwapCancel func(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) `perm:"sign"`
MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) `perm:"sign"` MsigSwapPropose func(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) `perm:"sign"`
NodeStatus func(p0 context.Context, p1 bool) (NodeStatus, error) `perm:"read"`
PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"` PaychAllocateLane func(p0 context.Context, p1 address.Address) (uint64, error) `perm:"sign"`
@ -518,6 +531,8 @@ type GatewayStruct struct {
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) `` StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64, p3 abi.ChainEpoch, p4 bool) (*MsgLookup, error) ``
Version func(p0 context.Context) (APIVersion, error) ``
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
} }
} }
@ -730,19 +745,19 @@ type StorageMinerStub struct {
type WalletStruct struct { type WalletStruct struct {
Internal struct { Internal struct {
WalletDelete func(p0 context.Context, p1 address.Address) error `` WalletDelete func(p0 context.Context, p1 address.Address) error `perm:"admin"`
WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `` WalletExport func(p0 context.Context, p1 address.Address) (*types.KeyInfo, error) `perm:"admin"`
WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `` WalletHas func(p0 context.Context, p1 address.Address) (bool, error) `perm:"admin"`
WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `` WalletImport func(p0 context.Context, p1 *types.KeyInfo) (address.Address, error) `perm:"admin"`
WalletList func(p0 context.Context) ([]address.Address, error) `` WalletList func(p0 context.Context) ([]address.Address, error) `perm:"admin"`
WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `` WalletNew func(p0 context.Context, p1 types.KeyType) (address.Address, error) `perm:"admin"`
WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `` WalletSign func(p0 context.Context, p1 address.Address, p2 []byte, p3 MsgMeta) (*crypto.Signature, error) `perm:"admin"`
} }
} }
@ -767,8 +782,6 @@ type WorkerStruct struct {
ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"`
ReadPiece func(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) `perm:"admin"`
ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"`
Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"`
@ -1292,6 +1305,14 @@ func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan DealInfo
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
return s.Internal.ClientGetRetrievalUpdates(p0)
}
func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan RetrievalInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
return s.Internal.ClientHasLocal(p0, p1) return s.Internal.ClientHasLocal(p0, p1)
} }
@ -1332,6 +1353,14 @@ func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]Import, error) {
return *new([]Import), xerrors.New("method not supported") return *new([]Import), xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
return s.Internal.ClientListRetrievals(p0)
}
func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]RetrievalInfo, error) {
return *new([]RetrievalInfo), xerrors.New("method not supported")
}
func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) { func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (QueryOffer, error) {
return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
} }
@ -1396,6 +1425,14 @@ func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *StartDealParams)
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
return s.Internal.ClientStatelessDeal(p0, p1)
}
func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) {
return nil, xerrors.New("method not supported")
}
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
return s.Internal.CreateBackup(p0, p1) return s.Internal.CreateBackup(p0, p1)
} }
@ -1516,6 +1553,30 @@ func (s *FullNodeStub) MpoolBatchPushUntrusted(p0 context.Context, p1 []*types.S
return *new([]cid.Cid), xerrors.New("method not supported") return *new([]cid.Cid), xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) {
return s.Internal.MpoolCheckMessages(p0, p1)
}
func (s *FullNodeStub) MpoolCheckMessages(p0 context.Context, p1 []*MessagePrototype) ([][]MessageCheckStatus, error) {
return *new([][]MessageCheckStatus), xerrors.New("method not supported")
}
func (s *FullNodeStruct) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) {
return s.Internal.MpoolCheckPendingMessages(p0, p1)
}
func (s *FullNodeStub) MpoolCheckPendingMessages(p0 context.Context, p1 address.Address) ([][]MessageCheckStatus, error) {
return *new([][]MessageCheckStatus), xerrors.New("method not supported")
}
func (s *FullNodeStruct) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) {
return s.Internal.MpoolCheckReplaceMessages(p0, p1)
}
func (s *FullNodeStub) MpoolCheckReplaceMessages(p0 context.Context, p1 []*types.Message) ([][]MessageCheckStatus, error) {
return *new([][]MessageCheckStatus), xerrors.New("method not supported")
}
func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error { func (s *FullNodeStruct) MpoolClear(p0 context.Context, p1 bool) error {
return s.Internal.MpoolClear(p0, p1) return s.Internal.MpoolClear(p0, p1)
} }
@ -1596,60 +1657,60 @@ func (s *FullNodeStub) MpoolSub(p0 context.Context) (<-chan MpoolUpdate, error)
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { func (s *FullNodeStruct) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) {
return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6) return s.Internal.MsigAddApprove(p0, p1, p2, p3, p4, p5, p6)
} }
func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (cid.Cid, error) { func (s *FullNodeStub) MsigAddApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 bool) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { func (s *FullNodeStruct) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) {
return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5) return s.Internal.MsigAddCancel(p0, p1, p2, p3, p4, p5)
} }
func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (cid.Cid, error) { func (s *FullNodeStub) MsigAddCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 bool) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { func (s *FullNodeStruct) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4) return s.Internal.MsigAddPropose(p0, p1, p2, p3, p4)
} }
func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { func (s *FullNodeStub) MsigAddPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { func (s *FullNodeStruct) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
return s.Internal.MsigApprove(p0, p1, p2, p3) return s.Internal.MsigApprove(p0, p1, p2, p3)
} }
func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (cid.Cid, error) { func (s *FullNodeStub) MsigApprove(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { func (s *FullNodeStruct) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) {
return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8) return s.Internal.MsigApproveTxnHash(p0, p1, p2, p3, p4, p5, p6, p7, p8)
} }
func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (cid.Cid, error) { func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7)
} }
func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (cid.Cid, error) { func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { func (s *FullNodeStruct) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) {
return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6) return s.Internal.MsigCreate(p0, p1, p2, p3, p4, p5, p6)
} }
func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (cid.Cid, error) { func (s *FullNodeStub) MsigCreate(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) { func (s *FullNodeStruct) MsigGetAvailableBalance(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) {
@ -1684,44 +1745,52 @@ func (s *FullNodeStub) MsigGetVestingSchedule(p0 context.Context, p1 address.Add
return *new(MsigVesting), xerrors.New("method not supported") return *new(MsigVesting), xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { func (s *FullNodeStruct) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) {
return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6) return s.Internal.MsigPropose(p0, p1, p2, p3, p4, p5, p6)
} }
func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (cid.Cid, error) { func (s *FullNodeStub) MsigPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 types.BigInt, p4 address.Address, p5 uint64, p6 []byte) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { func (s *FullNodeStruct) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4) return s.Internal.MsigRemoveSigner(p0, p1, p2, p3, p4)
} }
func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (cid.Cid, error) { func (s *FullNodeStub) MsigRemoveSigner(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 bool) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { func (s *FullNodeStruct) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) {
return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6) return s.Internal.MsigSwapApprove(p0, p1, p2, p3, p4, p5, p6)
} }
func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (cid.Cid, error) { func (s *FullNodeStub) MsigSwapApprove(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address, p6 address.Address) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { func (s *FullNodeStruct) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) {
return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5) return s.Internal.MsigSwapCancel(p0, p1, p2, p3, p4, p5)
} }
func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (cid.Cid, error) { func (s *FullNodeStub) MsigSwapCancel(p0 context.Context, p1 address.Address, p2 address.Address, p3 uint64, p4 address.Address, p5 address.Address) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { func (s *FullNodeStruct) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) {
return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4) return s.Internal.MsigSwapPropose(p0, p1, p2, p3, p4)
} }
func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (cid.Cid, error) { func (s *FullNodeStub) MsigSwapPropose(p0 context.Context, p1 address.Address, p2 address.Address, p3 address.Address, p4 address.Address) (*MessagePrototype, error) {
return *new(cid.Cid), xerrors.New("method not supported") return nil, xerrors.New("method not supported")
}
func (s *FullNodeStruct) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) {
return s.Internal.NodeStatus(p0, p1)
}
func (s *FullNodeStub) NodeStatus(p0 context.Context, p1 bool) (NodeStatus, error) {
return *new(NodeStatus), xerrors.New("method not supported")
} }
func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) { func (s *FullNodeStruct) PaychAllocateLane(p0 context.Context, p1 address.Address) (uint64, error) {
@ -2612,6 +2681,14 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64, p3
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *GatewayStruct) Version(p0 context.Context) (APIVersion, error) {
return s.Internal.Version(p0)
}
func (s *GatewayStub) Version(p0 context.Context) (APIVersion, error) {
return *new(APIVersion), xerrors.New("method not supported")
}
func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
return s.Internal.WalletBalance(p0, p1) return s.Internal.WalletBalance(p0, p1)
} }
@ -3484,14 +3561,6 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) {
return *new(uuid.UUID), xerrors.New("method not supported") return *new(uuid.UUID), xerrors.New("method not supported")
} }
func (s *WorkerStruct) ReadPiece(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) {
return s.Internal.ReadPiece(p0, p1, p2, p3, p4)
}
func (s *WorkerStub) ReadPiece(p0 context.Context, p1 io.Writer, p2 storage.SectorRef, p3 storiface.UnpaddedByteIndex, p4 abi.UnpaddedPieceSize) (storiface.CallID, error) {
return *new(storiface.CallID), xerrors.New("method not supported")
}
func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) {
return s.Internal.ReleaseUnsealed(p0, p1, p2) return s.Internal.ReleaseUnsealed(p0, p1, p2)
} }

View File

@ -1,61 +0,0 @@
package test
import (
"context"
"fmt"
"sync/atomic"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/miner"
)
type BlockMiner struct {
ctx context.Context
t *testing.T
miner TestStorageNode
blocktime time.Duration
mine int64
nulls int64
done chan struct{}
}
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
return &BlockMiner{
ctx: ctx,
t: t,
miner: miner,
blocktime: blocktime,
mine: int64(1),
done: make(chan struct{}),
}
}
func (bm *BlockMiner) MineBlocks() {
time.Sleep(time.Second)
go func() {
defer close(bm.done)
for atomic.LoadInt64(&bm.mine) == 1 {
select {
case <-bm.ctx.Done():
return
case <-time.After(bm.blocktime):
}
nulls := atomic.SwapInt64(&bm.nulls, 0)
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
InjectNulls: abi.ChainEpoch(nulls),
Done: func(bool, abi.ChainEpoch, error) {},
}); err != nil {
bm.t.Error(err)
}
}
}()
}
func (bm *BlockMiner) Stop() {
atomic.AddInt64(&bm.mine, -1)
fmt.Println("shutting down mining")
<-bm.done
}

View File

@ -1,621 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"testing"
"time"
"github.com/ipfs/go-cid"
files "github.com/ipfs/go-ipfs-files"
"github.com/ipld/go-car"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-fil-markets/storagemarket"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
"github.com/filecoin-project/lotus/chain/types"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
"github.com/filecoin-project/lotus/extern/storage-sealing/sealiface"
"github.com/filecoin-project/lotus/markets/storageadapter"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
"github.com/filecoin-project/lotus/node/modules/dtypes"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
dstest "github.com/ipfs/go-merkledag/test"
unixfile "github.com/ipfs/go-unixfs/file"
)
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
MakeDeal(t, s.ctx, 6, s.client, s.miner, carExport, fastRet, startEpoch)
}
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
MakeDeal(t, s.ctx, 7, s.client, s.miner, false, false, startEpoch)
}
func MakeDeal(t *testing.T, ctx context.Context, rseed int, client api.FullNode, miner TestStorageNode, carExport, fastRet bool, startEpoch abi.ChainEpoch) {
res, data, err := CreateClientFile(ctx, client, rseed, 0)
if err != nil {
t.Fatal(err)
}
fcid := res.Root
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, ctx, miner, client, fcid, fastRet, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, miner, client, deal, false, false, nil)
// Retrieval
info, err := client.ClientGetDealInfo(ctx, *deal)
require.NoError(t, err)
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
}
func CreateClientFile(ctx context.Context, client api.FullNode, rseed, size int) (*api.ImportRes, []byte, error) {
if size == 0 {
size = 1600
}
data := make([]byte, size)
rand.New(rand.NewSource(int64(rseed))).Read(data)
dir, err := ioutil.TempDir(os.TempDir(), "test-make-deal-")
if err != nil {
return nil, nil, err
}
path := filepath.Join(dir, "sourcefile.dat")
err = ioutil.WriteFile(path, data, 0644)
if err != nil {
return nil, nil, err
}
res, err := client.ClientImport(ctx, api.FileRef{Path: path})
if err != nil {
return nil, nil, err
}
return res, data, nil
}
func TestPublishDealsBatching(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(2)
// Set max deals per publish deals message to 2
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
// Starts a deal and waits until it's published
runDealTillPublish := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, 0)
require.NoError(t, err)
upds, err := client.ClientGetDealUpdates(s.ctx)
require.NoError(t, err)
startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
done := make(chan struct{})
go func() {
for upd := range upds {
if upd.DataRef.Root == res.Root && upd.State == storagemarket.StorageDealAwaitingPreCommit {
done <- struct{}{}
}
}
}()
<-done
}
// Run three deals in parallel
done := make(chan struct{}, maxDealsPerMsg+1)
for rseed := 1; rseed <= 3; rseed++ {
rseed := rseed
go func() {
runDealTillPublish(rseed)
done <- struct{}{}
}()
}
// Wait for two of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
// Expect a single PublishStorageDeals message that includes the first two deals
msgCids, err := s.client.StateListMessages(s.ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1)
require.NoError(t, err)
count := 0
for _, msgCid := range msgCids {
msg, err := s.client.ChainGetMessage(s.ctx, msgCid)
require.NoError(t, err)
if msg.Method == market.Methods.PublishStorageDeals {
count++
var pubDealsParams market2.PublishStorageDealsParams
err = pubDealsParams.UnmarshalCBOR(bytes.NewReader(msg.Params))
require.NoError(t, err)
require.Len(t, pubDealsParams.Deals, int(maxDealsPerMsg))
}
}
require.Equal(t, 1, count)
// The third deal should be published once the publish period expires.
// Allow a little padding as it takes a moment for the state change to
// be noticed by the client.
padding := 10 * time.Second
select {
case <-time.After(publishPeriod + padding):
require.Fail(t, "Expected 3rd deal to be published once publish period elapsed")
case <-done: // Success
}
}
func TestBatchDealInput(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
run := func(piece, deals, expectSectors int) func(t *testing.T) {
return func(t *testing.T) {
publishPeriod := 10 * time.Second
maxDealsPerMsg := uint64(deals)
// Set max deals per publish deals message to maxDealsPerMsg
minerDef := []StorageMiner{{
Full: 0,
Opts: node.Options(
node.Override(
new(*storageadapter.DealPublisher),
storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{
Period: publishPeriod,
MaxDealsPerMsg: maxDealsPerMsg,
})),
node.Override(new(dtypes.GetSealingConfigFunc), func() (dtypes.GetSealingConfigFunc, error) {
return func() (sealiface.Config, error) {
return sealiface.Config{
MaxWaitDealsSectors: 2,
MaxSealingSectors: 1,
MaxSealingSectorsForDeals: 3,
AlwaysKeepUnsealedCopy: true,
WaitDealsDelay: time.Hour,
}, nil
}, nil
}),
),
Preseal: PresealGenesis,
}}
// Create a connect client and miner node
n, sn := b(t, OneFull, minerDef)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
s := connectAndStartMining(t, b, blocktime, client, miner)
defer s.blockMiner.Stop()
err := miner.MarketSetAsk(s.ctx, big.Zero(), big.Zero(), 200, 128, 32<<30)
require.NoError(t, err)
checkNoPadding := func() {
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
sort.Slice(sl, func(i, j int) bool {
return sl[i] < sl[j]
})
for _, snum := range sl {
si, err := sn[0].SectorsStatus(s.ctx, snum, false)
require.NoError(t, err)
// fmt.Printf("S %d: %+v %s\n", snum, si.Deals, si.State)
for _, deal := range si.Deals {
if deal == 0 {
fmt.Printf("sector %d had a padding piece!\n", snum)
}
}
}
}
// Starts a deal and waits until it's published
runDealTillSeal := func(rseed int) {
res, _, err := CreateClientFile(s.ctx, s.client, rseed, piece)
require.NoError(t, err)
dc := startDeal(t, s.ctx, s.miner, s.client, res.Root, false, startEpoch)
waitDealSealed(t, s.ctx, s.miner, s.client, dc, false, true, checkNoPadding)
}
// Run maxDealsPerMsg deals in parallel
done := make(chan struct{}, maxDealsPerMsg)
for rseed := 0; rseed < int(maxDealsPerMsg); rseed++ {
rseed := rseed
go func() {
runDealTillSeal(rseed)
done <- struct{}{}
}()
}
// Wait for maxDealsPerMsg of the deals to be published
for i := 0; i < int(maxDealsPerMsg); i++ {
<-done
}
checkNoPadding()
sl, err := sn[0].SectorsList(s.ctx)
require.NoError(t, err)
require.Equal(t, len(sl), expectSectors)
}
}
t.Run("4-p1600B", run(1600, 4, 4))
t.Run("4-p513B", run(513, 4, 2))
if !testing.Short() {
t.Run("32-p257B", run(257, 32, 8))
t.Run("32-p10B", run(10, 32, 2))
// fixme: this appears to break data-transfer / markets in some really creative ways
//t.Run("128-p10B", run(10, 128, 8))
}
}
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
data := make([]byte, 1600)
rand.New(rand.NewSource(int64(8))).Read(data)
r := bytes.NewReader(data)
fcid, err := s.client.ClientImportLocal(s.ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
deal := startDeal(t, s.ctx, s.miner, s.client, fcid, true, startEpoch)
waitDealPublished(t, s.ctx, s.miner, deal)
fmt.Println("deal published, retrieving")
// Retrieval
info, err := s.client.ClientGetDealInfo(s.ctx, *deal)
require.NoError(t, err)
testRetrieval(t, s.ctx, s.client, fcid, &info.PieceCID, false, data)
}
func TestSecondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
{
data1 := make([]byte, 800)
rand.New(rand.NewSource(int64(3))).Read(data1)
r := bytes.NewReader(data1)
fcid1, err := s.client.ClientImportLocal(s.ctx, r)
if err != nil {
t.Fatal(err)
}
data2 := make([]byte, 800)
rand.New(rand.NewSource(int64(9))).Read(data2)
r2 := bytes.NewReader(data2)
fcid2, err := s.client.ClientImportLocal(s.ctx, r2)
if err != nil {
t.Fatal(err)
}
deal1 := startDeal(t, s.ctx, s.miner, s.client, fcid1, true, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal1, true, false, nil)
deal2 := startDeal(t, s.ctx, s.miner, s.client, fcid2, true, 0)
time.Sleep(time.Second)
waitDealSealed(t, s.ctx, s.miner, s.client, deal2, false, false, nil)
// Retrieval
info, err := s.client.ClientGetDealInfo(s.ctx, *deal2)
require.NoError(t, err)
rf, _ := s.miner.SectorsRefs(s.ctx)
fmt.Printf("refs: %+v\n", rf)
testRetrieval(t, s.ctx, s.client, fcid2, &info.PieceCID, false, data2)
}
}
func TestZeroPricePerByteRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, startEpoch abi.ChainEpoch) {
s := setupOneClientOneMiner(t, b, blocktime)
defer s.blockMiner.Stop()
// Set price-per-byte to zero
ask, err := s.miner.MarketGetRetrievalAsk(s.ctx)
require.NoError(t, err)
ask.PricePerByte = abi.NewTokenAmount(0)
err = s.miner.MarketSetRetrievalAsk(s.ctx, ask)
require.NoError(t, err)
MakeDeal(t, s.ctx, 6, s.client, s.miner, false, false, startEpoch)
}
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, fcid cid.Cid, fastRet bool, startEpoch abi.ChainEpoch) *cid.Cid {
maddr, err := miner.ActorAddress(ctx)
if err != nil {
t.Fatal(err)
}
addr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
Data: &storagemarket.DataRef{
TransferType: storagemarket.TTGraphsync,
Root: fcid,
},
Wallet: addr,
Miner: maddr,
EpochPrice: types.NewInt(1000000),
DealStartEpoch: startEpoch,
MinBlocksDuration: uint64(build.MinDealDuration),
FastRetrieval: fastRet,
})
if err != nil {
t.Fatalf("%+v", err)
}
return deal
}
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client api.FullNode, deal *cid.Cid, noseal, noSealStart bool, cb func()) {
loop:
for {
di, err := client.ClientGetDealInfo(ctx, *deal)
if err != nil {
t.Fatal(err)
}
switch di.State {
case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing:
if noseal {
return
}
if !noSealStart {
startSealingWaiting(t, ctx, miner)
}
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
break loop
}
mds, err := miner.MarketListIncompleteDeals(ctx)
if err != nil {
t.Fatal(err)
}
var minerState storagemarket.StorageDealStatus
for _, md := range mds {
if md.DealID == di.DealID {
minerState = md.State
break
}
}
fmt.Printf("Deal %d state: client:%s provider:%s\n", di.DealID, storagemarket.DealStates[di.State], storagemarket.DealStates[minerState])
time.Sleep(time.Second / 2)
if cb != nil {
cb()
}
}
}
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
updates, err := miner.MarketGetDealUpdates(subCtx)
if err != nil {
t.Fatal(err)
}
for {
select {
case <-ctx.Done():
t.Fatal("context timeout")
case di := <-updates:
if deal.Equals(di.ProposalCid) {
switch di.State {
case storagemarket.StorageDealProposalRejected:
t.Fatal("deal rejected")
case storagemarket.StorageDealFailing:
t.Fatal("deal failed")
case storagemarket.StorageDealError:
t.Fatal("deal errored", di.Message)
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
fmt.Println("COMPLETE", di)
return
}
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
}
}
}
}
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
snums, err := miner.SectorsList(ctx)
require.NoError(t, err)
for _, snum := range snums {
si, err := miner.SectorsStatus(ctx, snum, false)
require.NoError(t, err)
t.Logf("Sector %d state: %s", snum, si.State)
if si.State == api.SectorState(sealing.WaitDeals) {
require.NoError(t, miner.SectorStartSealing(ctx, snum))
}
}
flushSealingBatches(t, ctx, miner)
}
func testRetrieval(t *testing.T, ctx context.Context, client api.FullNode, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
offers, err := client.ClientFindData(ctx, fcid, piece)
if err != nil {
t.Fatal(err)
}
if len(offers) < 1 {
t.Fatal("no offers")
}
rpath, err := ioutil.TempDir("", "lotus-retrieve-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rpath) //nolint:errcheck
caddr, err := client.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
ref := &api.FileRef{
Path: filepath.Join(rpath, "ret"),
IsCAR: carExport,
}
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
if err != nil {
t.Fatal(err)
}
for update := range updates {
if update.Err != "" {
t.Fatalf("retrieval failed: %s", update.Err)
}
}
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
if err != nil {
t.Fatal(err)
}
if carExport {
rdata = extractCarData(t, ctx, rdata, rpath)
}
if !bytes.Equal(rdata, data) {
t.Fatal("wrong data retrieved")
}
}
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
bserv := dstest.Bserv()
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
if err != nil {
t.Fatal(err)
}
b, err := bserv.GetBlock(ctx, ch.Roots[0])
if err != nil {
t.Fatal(err)
}
nd, err := ipld.Decode(b)
if err != nil {
t.Fatal(err)
}
dserv := dag.NewDAGService(bserv)
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
if err != nil {
t.Fatal(err)
}
outPath := filepath.Join(rpath, "retLoadedCAR")
if err := files.WriteTo(fil, outPath); err != nil {
t.Fatal(err)
}
rdata, err = ioutil.ReadFile(outPath)
if err != nil {
t.Fatal(err)
}
return rdata
}
type dealsScaffold struct {
ctx context.Context
client *impl.FullNodeAPI
miner TestStorageNode
blockMiner *BlockMiner
}
func setupOneClientOneMiner(t *testing.T, b APIBuilder, blocktime time.Duration) *dealsScaffold {
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
return connectAndStartMining(t, b, blocktime, client, miner)
}
func connectAndStartMining(t *testing.T, b APIBuilder, blocktime time.Duration, client *impl.FullNodeAPI, miner TestStorageNode) *dealsScaffold {
ctx := context.Background()
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
blockMiner := NewBlockMiner(ctx, t, miner, blocktime)
blockMiner.MineBlocks()
return &dealsScaffold{
ctx: ctx,
client: client,
miner: miner,
blockMiner: blockMiner,
}
}

View File

@ -1,240 +0,0 @@
package test
import (
"bytes"
"context"
"fmt"
"math/rand"
"sync/atomic"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node/impl"
)
//nolint:deadcode,varcheck
var log = logging.Logger("apitest")
func (ts *testSuite) testMining(t *testing.T) {
ctx := context.Background()
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
initHead := (<-newHeads)[0]
baseHeight := initHead.Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(h1.Height()), int64(baseHeight))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
}
func (ts *testSuite) testMiningReal(t *testing.T) {
build.InsecurePoStValidation = false
defer func() {
build.InsecurePoStValidation = true
}()
ctx := context.Background()
apis, sn := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
newHeads, err := api.ChainNotify(ctx)
require.NoError(t, err)
at := (<-newHeads)[0].Val.Height()
h1, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Equal(t, int64(at), int64(h1.Height()))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h2, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
require.NoError(t, err)
<-newHeads
h3, err := api.ChainHead(ctx)
require.NoError(t, err)
require.Greater(t, int64(h3.Height()), int64(h2.Height()))
}
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
// test making a deal with a fresh miner, and see if it starts to mine
ctx := context.Background()
n, sn := b(t, OneFull, []StorageMiner{
{Full: 0, Preseal: PresealGenesis},
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
})
client := n[0].FullNode.(*impl.FullNodeAPI)
provider := sn[1]
genesisMiner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := provider.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
time.Sleep(time.Second)
data := make([]byte, 600)
rand.New(rand.NewSource(5)).Read(data)
r := bytes.NewReader(data)
fcid, err := client.ClientImportLocal(ctx, r)
if err != nil {
t.Fatal(err)
}
fmt.Println("FILE CID: ", fcid)
var mine int32 = 1
done := make(chan struct{})
minedTwo := make(chan struct{})
m2addr, err := sn[1].ActorAddress(context.TODO())
if err != nil {
t.Fatal(err)
}
go func() {
defer close(done)
complChan := minedTwo
for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int)
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
n := 0
if mined {
n = 1
}
wait <- n
}
if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
t.Error(err)
}
expect := <-wait
expect += <-wait
time.Sleep(blocktime)
if expect == 0 {
// null block
continue
}
var nodeOneMined bool
for _, node := range sn {
mb, err := node.MiningBase(ctx)
if err != nil {
t.Error(err)
return
}
for _, b := range mb.Blocks() {
if b.Miner == m2addr {
nodeOneMined = true
break
}
}
}
if nodeOneMined && complChan != nil {
close(complChan)
complChan = nil
}
}
}()
deal := startDeal(t, ctx, provider, client, fcid, false, 0)
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
time.Sleep(time.Second)
waitDealSealed(t, ctx, provider, client, deal, false, false, nil)
<-minedTwo
atomic.StoreInt32(&mine, 0)
fmt.Println("shutting down mining")
<-done
}
func (ts *testSuite) testNonGenesisMiner(t *testing.T) {
ctx := context.Background()
n, sn := ts.makeNodes(t, []FullNodeOpts{
FullNodeWithLatestActorsAt(-1),
}, []StorageMiner{
{Full: 0, Preseal: PresealGenesis},
})
full, ok := n[0].FullNode.(*impl.FullNodeAPI)
if !ok {
t.Skip("not testing with a full node")
return
}
genesisMiner := sn[0]
bm := NewBlockMiner(ctx, t, genesisMiner, 4*time.Millisecond)
bm.MineBlocks()
t.Cleanup(bm.Stop)
gaa, err := genesisMiner.ActorAddress(ctx)
require.NoError(t, err)
gmi, err := full.StateMinerInfo(ctx, gaa, types.EmptyTSK)
require.NoError(t, err)
testm := n[0].Stb(ctx, t, TestSpt, gmi.Owner)
ta, err := testm.ActorAddress(ctx)
require.NoError(t, err)
tid, err := address.IDFromAddress(ta)
require.NoError(t, err)
require.Equal(t, uint64(1001), tid)
}

View File

@ -1,389 +0,0 @@
package test
import (
"context"
"fmt"
"sort"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
bminer "github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
"github.com/filecoin-project/lotus/node/impl"
)
func TestSDRUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithSDRAt(500, 1000)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
pledge := make(chan struct{})
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
round := 0
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
// 3 sealing rounds: before, during after.
if round >= 3 {
continue
}
head, err := client.ChainHead(ctx)
assert.NoError(t, err)
// rounds happen every 100 blocks, with a 50 block offset.
if head.Height() >= abi.ChainEpoch(round*500+50) {
round++
pledge <- struct{}{}
ver, err := client.StateNetworkVersion(ctx, head.Key())
assert.NoError(t, err)
switch round {
case 1:
assert.Equal(t, network.Version6, ver)
case 2:
assert.Equal(t, network.Version7, ver)
case 3:
assert.Equal(t, network.Version8, ver)
}
}
}
}()
// before.
pledgeSectors(t, ctx, miner, 9, 0, pledge)
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
sort.Slice(s, func(i, j int) bool {
return s[i] < s[j]
})
for i, id := range s {
info, err := miner.SectorsStatus(ctx, id, true)
require.NoError(t, err)
expectProof := abi.RegisteredSealProof_StackedDrg2KiBV1
if i >= 3 {
// after
expectProof = abi.RegisteredSealProof_StackedDrg2KiBV1_1
}
assert.Equal(t, expectProof, info.SealProof, "sector %d, id %d", i, id)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBatching(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{FullNodeWithLatestActorsAt(-1)}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
if states[api.SectorState(sealing.SubmitPreCommitBatch)] == nSectors ||
(states[api.SectorState(sealing.SubmitPreCommitBatch)] > 0 && states[api.SectorState(sealing.PreCommit1)] == 0 && states[api.SectorState(sealing.PreCommit2)] == 0) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
}
if states[api.SectorState(sealing.SubmitCommitAggregate)] == nSectors ||
(states[api.SectorState(sealing.SubmitCommitAggregate)] > 0 && states[api.SectorState(sealing.WaitSeed)] == 0 && states[api.SectorState(sealing.Committing)] == 0) {
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeBeforeNv13(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, []FullNodeOpts{
{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: 1000000000,
Migration: stmgr.UpgradeActorsV5,
}})
},
},
}, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
for {
h, err := client.ChainHead(ctx)
require.NoError(t, err)
if h.Height() > 10 {
break
}
}
toCheck := startPledge(t, ctx, miner, nSectors, 0, nil)
for len(toCheck) > 0 {
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
atomic.StoreInt64(&mine, 0)
<-done
}
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
n, sn := b(t, OneFull, OneMiner)
client := n[0].FullNode.(*impl.FullNodeAPI)
miner := sn[0]
addrinfo, err := client.NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := miner.NetConnect(ctx, addrinfo); err != nil {
t.Fatal(err)
}
build.Clock.Sleep(time.Second)
mine := int64(1)
done := make(chan struct{})
go func() {
defer close(done)
for atomic.LoadInt64(&mine) != 0 {
build.Clock.Sleep(blocktime)
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
}}); err != nil {
t.Error(err)
}
}
}()
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
atomic.StoreInt64(&mine, 0)
<-done
}
func flushSealingBatches(t *testing.T, ctx context.Context, miner TestStorageNode) {
pcb, err := miner.SectorPreCommitFlush(ctx)
require.NoError(t, err)
if pcb != nil {
fmt.Printf("PRECOMMIT BATCH: %+v\n", pcb)
}
cb, err := miner.SectorCommitFlush(ctx)
require.NoError(t, err)
if cb != nil {
fmt.Printf("COMMIT BATCH: %+v\n", cb)
}
}
func startPledge(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) map[abi.SectorNumber]struct{} {
for i := 0; i < n; i++ {
if i%3 == 0 && blockNotif != nil {
<-blockNotif
log.Errorf("WAIT")
}
log.Errorf("PLEDGING %d", i)
_, err := miner.PledgeSector(ctx)
require.NoError(t, err)
}
for {
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
require.NoError(t, err)
fmt.Printf("Sectors: %d\n", len(s))
if len(s) >= n+existing {
break
}
build.Clock.Sleep(100 * time.Millisecond)
}
fmt.Printf("All sectors is fsm\n")
s, err := miner.SectorsList(ctx)
require.NoError(t, err)
toCheck := map[abi.SectorNumber]struct{}{}
for _, number := range s {
toCheck[number] = struct{}{}
}
return toCheck
}
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
toCheck := startPledge(t, ctx, miner, n, existing, blockNotif)
for len(toCheck) > 0 {
flushSealingBatches(t, ctx, miner)
states := map[api.SectorState]int{}
for n := range toCheck {
st, err := miner.SectorsStatus(ctx, n, false)
require.NoError(t, err)
states[st.State]++
if st.State == api.SectorState(sealing.Proving) {
delete(toCheck, n)
}
if strings.Contains(string(st.State), "Fail") {
t.Fatal("sector in a failed state", st.State)
}
}
build.Clock.Sleep(100 * time.Millisecond)
fmt.Printf("WaitSeal: %d %+v\n", len(toCheck), states)
}
}

View File

@ -1,323 +0,0 @@
package test
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
logging "github.com/ipfs/go-log/v2"
"github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
"github.com/filecoin-project/lotus/node"
)
func init() {
logging.SetAllLoggers(logging.LevelInfo)
err := os.Setenv("BELLMAN_NO_GPU", "1")
if err != nil {
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
}
build.InsecurePoStValidation = true
}
type StorageBuilder func(context.Context, *testing.T, abi.RegisteredSealProof, address.Address) TestStorageNode
type TestNode struct {
v1api.FullNode
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
Stb StorageBuilder
}
type TestStorageNode struct {
lapi.StorageMiner
// ListenAddr is the address on which an API server is listening, if an
// API server is created for this Node
ListenAddr multiaddr.Multiaddr
MineOne func(context.Context, miner.MineReq) error
Stop func(context.Context) error
}
var PresealGenesis = -1
const GenesisPreseals = 2
const TestSpt = abi.RegisteredSealProof_StackedDrg2KiBV1_1
// Options for setting up a mock storage miner
type StorageMiner struct {
Full int
Opts node.Option
Preseal int
}
type OptionGenerator func([]TestNode) node.Option
// Options for setting up a mock full node
type FullNodeOpts struct {
Lite bool // run node in "lite" mode
Opts OptionGenerator // generate dependency injection options
}
// APIBuilder is a function which is invoked in test suite to provide
// test nodes and networks
//
// fullOpts array defines options for each full node
// storage array defines storage nodes, numbers in the array specify full node
// index the storage node 'belongs' to
type APIBuilder func(t *testing.T, full []FullNodeOpts, storage []StorageMiner) ([]TestNode, []TestStorageNode)
type testSuite struct {
makeNodes APIBuilder
}
// TestApis is the entry point to API test suite
func TestApis(t *testing.T, b APIBuilder) {
ts := testSuite{
makeNodes: b,
}
t.Run("version", ts.testVersion)
t.Run("id", ts.testID)
t.Run("testConnectTwo", ts.testConnectTwo)
t.Run("testMining", ts.testMining)
t.Run("testMiningReal", ts.testMiningReal)
t.Run("testSearchMsg", ts.testSearchMsg)
t.Run("testNonGenesisMiner", ts.testNonGenesisMiner)
}
func DefaultFullOpts(nFull int) []FullNodeOpts {
full := make([]FullNodeOpts, nFull)
for i := range full {
full[i] = FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Options()
},
}
}
return full
}
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
var OneFull = DefaultFullOpts(1)
var TwoFull = DefaultFullOpts(2)
var FullNodeWithLatestActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
if upgradeHeight == -1 {
// Attention: Update this when introducing new actor versions or your tests will be sad
upgradeHeight = 4
}
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: 3,
Migration: stmgr.UpgradeActorsV4,
}, {
Network: network.Version13,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV5,
}})
},
}
}
var FullNodeWithSDRAt = func(calico, persian abi.ChainEpoch) FullNodeOpts {
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
Network: network.Version6,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version7,
Height: calico,
Migration: stmgr.UpgradeCalico,
}, {
Network: network.Version8,
Height: persian,
}})
},
}
}
var FullNodeWithV4ActorsAt = func(upgradeHeight abi.ChainEpoch) FullNodeOpts {
if upgradeHeight == -1 {
upgradeHeight = 3
}
return FullNodeOpts{
Opts: func(nodes []TestNode) node.Option {
return node.Override(new(stmgr.UpgradeSchedule), stmgr.UpgradeSchedule{{
// prepare for upgrade.
Network: network.Version9,
Height: 1,
Migration: stmgr.UpgradeActorsV2,
}, {
Network: network.Version10,
Height: 2,
Migration: stmgr.UpgradeActorsV3,
}, {
Network: network.Version12,
Height: upgradeHeight,
Migration: stmgr.UpgradeActorsV4,
}})
},
}
}
var MineNext = miner.MineReq{
InjectNulls: 0,
Done: func(bool, abi.ChainEpoch, error) {},
}
func (ts *testSuite) testVersion(t *testing.T) {
lapi.RunningNodeType = lapi.NodeFull
t.Cleanup(func() {
lapi.RunningNodeType = lapi.NodeUnknown
})
ctx := context.Background()
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
napi := apis[0]
v, err := napi.Version(ctx)
if err != nil {
t.Fatal(err)
}
versions := strings.Split(v.Version, "+")
if len(versions) <= 0 {
t.Fatal("empty version")
}
require.Equal(t, versions[0], build.BuildVersion)
}
func (ts *testSuite) testSearchMsg(t *testing.T) {
apis, miners := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
senderAddr, err := api.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: senderAddr,
Value: big.Zero(),
}
bm := NewBlockMiner(ctx, t, miners[0], 100*time.Millisecond)
bm.MineBlocks()
defer bm.Stop()
sm, err := api.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send message")
}
searchRes, err := api.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if searchRes.TipSet != res.TipSet {
t.Fatalf("search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet)
}
}
func (ts *testSuite) testID(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, OneFull, OneMiner)
api := apis[0]
id, err := api.ID(ctx)
if err != nil {
t.Fatal(err)
}
assert.Regexp(t, "^12", id.Pretty())
}
func (ts *testSuite) testConnectTwo(t *testing.T) {
ctx := context.Background()
apis, _ := ts.makeNodes(t, TwoFull, OneMiner)
p, err := apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 0 has a peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 0 {
t.Error("Node 1 has a peer")
}
addrs, err := apis[1].NetAddrsListen(ctx)
if err != nil {
t.Fatal(err)
}
if err := apis[0].NetConnect(ctx, addrs); err != nil {
t.Fatal(err)
}
p, err = apis[0].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
p, err = apis[1].NetPeers(ctx)
if err != nil {
t.Fatal(err)
}
if len(p) != 1 {
t.Error("Node 0 doesn't have 1 peer")
}
}

View File

@ -1,87 +0,0 @@
package test
import (
"context"
"testing"
"time"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-address"
lapi "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
senderAddr, err := sender.WalletDefaultAddress(ctx)
if err != nil {
t.Fatal(err)
}
msg := &types.Message{
From: senderAddr,
To: addr,
Value: amount,
}
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
if err != nil {
t.Fatal(err)
}
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true)
if err != nil {
t.Fatal(err)
}
if res.Receipt.ExitCode != 0 {
t.Fatal("did not successfully send money")
}
}
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
for i := 0; i < 1000; i++ {
var success bool
var err error
var epoch abi.ChainEpoch
wait := make(chan struct{})
mineErr := sn.MineOne(ctx, miner.MineReq{
Done: func(win bool, ep abi.ChainEpoch, e error) {
success = win
err = e
epoch = ep
wait <- struct{}{}
},
})
if mineErr != nil {
t.Fatal(mineErr)
}
<-wait
if err != nil {
t.Fatal(err)
}
if success {
// Wait until it shows up on the given full nodes ChainHead
nloops := 50
for i := 0; i < nloops; i++ {
ts, err := fn.ChainHead(ctx)
if err != nil {
t.Fatal(err)
}
if ts.Height() == epoch {
break
}
if i == nloops-1 {
t.Fatal("block never managed to sync to node")
}
time.Sleep(time.Millisecond * 10)
}
if cb != nil {
cb(epoch)
}
return
}
t.Log("did not mine block, trying again", i)
}
t.Fatal("failed to mine 1000 times in a row...")
}

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,9 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
"github.com/filecoin-project/lotus/chain/types"
datatransfer "github.com/filecoin-project/go-data-transfer" datatransfer "github.com/filecoin-project/go-data-transfer"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -116,3 +119,79 @@ type ConnMgrInfo struct {
Tags map[string]int Tags map[string]int
Conns map[string]time.Time Conns map[string]time.Time
} }
type NodeStatus struct {
SyncStatus NodeSyncStatus
PeerStatus NodePeerStatus
ChainStatus NodeChainStatus
}
type NodeSyncStatus struct {
Epoch uint64
Behind uint64
}
type NodePeerStatus struct {
PeersToPublishMsgs int
PeersToPublishBlocks int
}
type NodeChainStatus struct {
BlocksPerTipsetLast100 float64
BlocksPerTipsetLastFinality float64
}
type CheckStatusCode int
//go:generate go run golang.org/x/tools/cmd/stringer -type=CheckStatusCode -trimprefix=CheckStatus
const (
_ CheckStatusCode = iota
// Message Checks
CheckStatusMessageSerialize
CheckStatusMessageSize
CheckStatusMessageValidity
CheckStatusMessageMinGas
CheckStatusMessageMinBaseFee
CheckStatusMessageBaseFee
CheckStatusMessageBaseFeeLowerBound
CheckStatusMessageBaseFeeUpperBound
CheckStatusMessageGetStateNonce
CheckStatusMessageNonce
CheckStatusMessageGetStateBalance
CheckStatusMessageBalance
)
type CheckStatus struct {
Code CheckStatusCode
OK bool
Err string
Hint map[string]interface{}
}
type MessageCheckStatus struct {
Cid cid.Cid
CheckStatus
}
type MessagePrototype struct {
Message types.Message
ValidNonce bool
}
type RetrievalInfo struct {
PayloadCID cid.Cid
ID retrievalmarket.DealID
PieceCID *cid.Cid
PricePerByte abi.TokenAmount
UnsealPrice abi.TokenAmount
Status retrievalmarket.DealStatus
Message string // more information about deal state, particularly errors
Provider peer.ID
BytesReceived uint64
BytesPaidFor uint64
TotalPaid abi.TokenAmount
TransferChannelID *datatransfer.ChannelID
DataTransfer *DataTransferChannel
}

View File

@ -304,6 +304,8 @@ type FullNode interface {
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error //perm:admin
// ClientStartDeal proposes a deal with a miner. // ClientStartDeal proposes a deal with a miner.
ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:admin
// ClientStatelessDeal fire-and-forget-proposes an offline deal to a miner without subsequent tracking.
ClientStatelessDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) //perm:write
// ClientGetDealInfo returns the latest information about a given deal. // ClientGetDealInfo returns the latest information about a given deal.
ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read ClientGetDealInfo(context.Context, cid.Cid) (*api.DealInfo, error) //perm:read
// ClientListDeals returns information about the deals made by the local client. // ClientListDeals returns information about the deals made by the local client.
@ -324,6 +326,10 @@ type FullNode interface {
// of status updates. // of status updates.
ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin
// ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientQueryAsk returns a signed StorageAsk from the specified miner.
// ClientListRetrievals returns information about retrievals made by the local client
ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write
// ClientGetRetrievalUpdates returns status of updated retrieval deals
ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) //perm:write
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.StorageAsk, error) //perm:read
// ClientCalcCommP calculates the CommP and data size of the specified CID // ClientCalcCommP calculates the CommP and data size of the specified CID
ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read ClientDealPieceCID(ctx context.Context, root cid.Cid) (api.DataCIDSize, error) //perm:read
@ -623,7 +629,7 @@ type FullNode interface {
// proposal. This method of approval can be used to ensure you only approve // proposal. This method of approval can be used to ensure you only approve
// exactly the transaction you think you are. // exactly the transaction you think you are.
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>, // It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message> // <sender address of the approve msg>, <method to call in the approved message>, <params to include in the proposed message>
MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) //perm:sign
// MsigCancel cancels a previously-proposed multisig message // MsigCancel cancels a previously-proposed multisig message

View File

@ -63,6 +63,7 @@ type Gateway interface {
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uint64) (*api.MsgLookup, error)
WalletBalance(context.Context, address.Address) (types.BigInt, error) WalletBalance(context.Context, address.Address) (types.BigInt, error)
Version(context.Context) (api.APIVersion, error)
} }
var _ Gateway = *new(FullNode) var _ Gateway = *new(FullNode)

View File

@ -97,6 +97,8 @@ type FullNodeStruct struct {
ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"` ClientGetDealUpdates func(p0 context.Context) (<-chan api.DealInfo, error) `perm:"write"`
ClientGetRetrievalUpdates func(p0 context.Context) (<-chan api.RetrievalInfo, error) `perm:"write"`
ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"` ClientHasLocal func(p0 context.Context, p1 cid.Cid) (bool, error) `perm:"write"`
ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"` ClientImport func(p0 context.Context, p1 api.FileRef) (*api.ImportRes, error) `perm:"admin"`
@ -107,6 +109,8 @@ type FullNodeStruct struct {
ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"` ClientListImports func(p0 context.Context) ([]api.Import, error) `perm:"write"`
ClientListRetrievals func(p0 context.Context) ([]api.RetrievalInfo, error) `perm:"write"`
ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"` ClientMinerQueryOffer func(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) `perm:"read"`
ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"` ClientQueryAsk func(p0 context.Context, p1 peer.ID, p2 address.Address) (*storagemarket.StorageAsk, error) `perm:"read"`
@ -123,6 +127,8 @@ type FullNodeStruct struct {
ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"`
ClientStatelessDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"write"`
CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"`
GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"` GasEstimateFeeCap func(p0 context.Context, p1 *types.Message, p2 int64, p3 types.TipSetKey) (types.BigInt, error) `perm:"read"`
@ -443,6 +449,8 @@ type GatewayStruct struct {
StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) `` StateWaitMsg func(p0 context.Context, p1 cid.Cid, p2 uint64) (*api.MsgLookup, error) ``
Version func(p0 context.Context) (api.APIVersion, error) ``
WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) `` WalletBalance func(p0 context.Context, p1 address.Address) (types.BigInt, error) ``
} }
} }
@ -714,6 +722,14 @@ func (s *FullNodeStub) ClientGetDealUpdates(p0 context.Context) (<-chan api.Deal
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
return s.Internal.ClientGetRetrievalUpdates(p0)
}
func (s *FullNodeStub) ClientGetRetrievalUpdates(p0 context.Context) (<-chan api.RetrievalInfo, error) {
return nil, xerrors.New("method not supported")
}
func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) { func (s *FullNodeStruct) ClientHasLocal(p0 context.Context, p1 cid.Cid) (bool, error) {
return s.Internal.ClientHasLocal(p0, p1) return s.Internal.ClientHasLocal(p0, p1)
} }
@ -754,6 +770,14 @@ func (s *FullNodeStub) ClientListImports(p0 context.Context) ([]api.Import, erro
return *new([]api.Import), xerrors.New("method not supported") return *new([]api.Import), xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
return s.Internal.ClientListRetrievals(p0)
}
func (s *FullNodeStub) ClientListRetrievals(p0 context.Context) ([]api.RetrievalInfo, error) {
return *new([]api.RetrievalInfo), xerrors.New("method not supported")
}
func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) { func (s *FullNodeStruct) ClientMinerQueryOffer(p0 context.Context, p1 address.Address, p2 cid.Cid, p3 *cid.Cid) (api.QueryOffer, error) {
return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3) return s.Internal.ClientMinerQueryOffer(p0, p1, p2, p3)
} }
@ -818,6 +842,14 @@ func (s *FullNodeStub) ClientStartDeal(p0 context.Context, p1 *api.StartDealPara
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *FullNodeStruct) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
return s.Internal.ClientStatelessDeal(p0, p1)
}
func (s *FullNodeStub) ClientStatelessDeal(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) {
return nil, xerrors.New("method not supported")
}
func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error {
return s.Internal.CreateBackup(p0, p1) return s.Internal.CreateBackup(p0, p1)
} }
@ -2066,6 +2098,14 @@ func (s *GatewayStub) StateWaitMsg(p0 context.Context, p1 cid.Cid, p2 uint64) (*
return nil, xerrors.New("method not supported") return nil, xerrors.New("method not supported")
} }
func (s *GatewayStruct) Version(p0 context.Context) (api.APIVersion, error) {
return s.Internal.Version(p0)
}
func (s *GatewayStub) Version(p0 context.Context) (api.APIVersion, error) {
return *new(api.APIVersion), xerrors.New("method not supported")
}
func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) { func (s *GatewayStruct) WalletBalance(p0 context.Context, p1 address.Address) (types.BigInt, error) {
return s.Internal.WalletBalance(p0, p1) return s.Internal.WalletBalance(p0, p1)
} }

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,9 @@ package v0api
import ( import (
"context" "context"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -57,4 +59,129 @@ func (w *WrapperV1Full) Version(ctx context.Context) (api.APIVersion, error) {
return ver, nil return ver, nil
} }
func (w *WrapperV1Full) executePrototype(ctx context.Context, p *api.MessagePrototype) (cid.Cid, error) {
sm, err := w.FullNode.MpoolPushMessage(ctx, &p.Message, nil)
if err != nil {
return cid.Undef, xerrors.Errorf("pushing message: %w", err)
}
return sm.Cid(), nil
}
func (w *WrapperV1Full) MsigCreate(ctx context.Context, req uint64, addrs []address.Address, duration abi.ChainEpoch, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) {
p, err := w.FullNode.MsigCreate(ctx, req, addrs, duration, val, src, gp)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigPropose(ctx, msig, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigApprove(ctx, msig, txID, src)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigApproveTxnHash(ctx, msig, txID, proposer, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddPropose(ctx context.Context, msig address.Address, src address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddPropose(ctx, msig, src, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddApprove(ctx, msig, src, txID, proposer, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigAddCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, newAdd address.Address, inc bool) (cid.Cid, error) {
p, err := w.FullNode.MsigAddCancel(ctx, msig, src, txID, newAdd, inc)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
p, err := w.FullNode.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
func (w *WrapperV1Full) MsigRemoveSigner(ctx context.Context, msig address.Address, proposer address.Address, toRemove address.Address, decrease bool) (cid.Cid, error) {
p, err := w.FullNode.MsigRemoveSigner(ctx, msig, proposer, toRemove, decrease)
if err != nil {
return cid.Undef, xerrors.Errorf("creating prototype: %w", err)
}
return w.executePrototype(ctx, p)
}
var _ FullNode = &WrapperV1Full{} var _ FullNode = &WrapperV1Full{}

View File

@ -57,8 +57,8 @@ var (
FullAPIVersion0 = newVer(1, 3, 0) FullAPIVersion0 = newVer(1, 3, 0)
FullAPIVersion1 = newVer(2, 1, 0) FullAPIVersion1 = newVer(2, 1, 0)
MinerAPIVersion0 = newVer(1, 0, 1) MinerAPIVersion0 = newVer(1, 1, 0)
WorkerAPIVersion0 = newVer(1, 0, 0) WorkerAPIVersion0 = newVer(1, 1, 0)
) )
//nolint:varcheck,deadcode //nolint:varcheck,deadcode

View File

@ -26,6 +26,27 @@ func Wrap(proxyT, wrapperT, impl interface{}) interface{} {
})) }))
} }
for i := 0; i < proxy.Elem().NumField(); i++ {
if proxy.Elem().Type().Field(i).Name == "Internal" {
continue
}
subProxy := proxy.Elem().Field(i).FieldByName("Internal")
for i := 0; i < ri.NumMethod(); i++ {
mt := ri.Type().Method(i)
if subProxy.FieldByName(mt.Name).Kind() == reflect.Invalid {
continue
}
fn := ri.Method(i)
of := subProxy.FieldByName(mt.Name)
subProxy.FieldByName(mt.Name).Set(reflect.MakeFunc(of.Type(), func(args []reflect.Value) (results []reflect.Value) {
return fn.Call(args)
}))
}
}
wp := reflect.New(reflect.TypeOf(wrapperT).Elem()) wp := reflect.New(reflect.TypeOf(wrapperT).Elem())
wp.Elem().Field(0).Set(proxy) wp.Elem().Field(0).Set(proxy)
return wp.Interface() return wp.Interface()

View File

@ -406,6 +406,11 @@ func (s *SplitStore) Close() error {
} }
func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error {
// Revert only.
if len(apply) == 0 {
return nil
}
s.mx.Lock() s.mx.Lock()
curTs := apply[len(apply)-1] curTs := apply[len(apply)-1]
epoch := curTs.Height() epoch := curTs.Height()

View File

@ -27,7 +27,7 @@ func init() {
} }
func testSplitStore(t *testing.T, cfg *Config) { func testSplitStore(t *testing.T, cfg *Config) {
chain := &mockChain{} chain := &mockChain{t: t}
// genesis // genesis
genBlock := mock.MkBlock(nil, 0, 0) genBlock := mock.MkBlock(nil, 0, 0)
genTs := mock.TipSet(genBlock) genTs := mock.TipSet(genBlock)
@ -169,6 +169,9 @@ func testSplitStore(t *testing.T, cfg *Config) {
t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt) t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt)
} }
} }
// Make sure we can revert without panicking.
chain.revert(2)
} }
func TestSplitStoreSimpleCompaction(t *testing.T) { func TestSplitStoreSimpleCompaction(t *testing.T) {
@ -191,6 +194,8 @@ func TestSplitStoreFullCompactionWithGC(t *testing.T) {
} }
type mockChain struct { type mockChain struct {
t testing.TB
sync.Mutex sync.Mutex
tipsets []*types.TipSet tipsets []*types.TipSet
listener func(revert []*types.TipSet, apply []*types.TipSet) error listener func(revert []*types.TipSet, apply []*types.TipSet) error
@ -204,7 +209,26 @@ func (c *mockChain) push(ts *types.TipSet) {
if c.listener != nil { if c.listener != nil {
err := c.listener(nil, []*types.TipSet{ts}) err := c.listener(nil, []*types.TipSet{ts})
if err != nil { if err != nil {
log.Errorf("mockchain: error dispatching listener: %s", err) c.t.Errorf("mockchain: error dispatching listener: %s", err)
}
}
}
func (c *mockChain) revert(count int) {
c.Lock()
revert := make([]*types.TipSet, count)
if count > len(c.tipsets) {
c.Unlock()
c.t.Fatalf("not enough tipsets to revert")
}
copy(revert, c.tipsets[len(c.tipsets)-count:])
c.tipsets = c.tipsets[:len(c.tipsets)-count]
c.Unlock()
if c.listener != nil {
err := c.listener(revert, nil)
if err != nil {
c.t.Errorf("mockchain: error dispatching listener: %s", err)
} }
} }
} }

View File

@ -2,28 +2,32 @@ package build
import ( import (
"context" "context"
"embed"
"path"
"strings" "strings"
"github.com/filecoin-project/lotus/lib/addrutil" "github.com/filecoin-project/lotus/lib/addrutil"
rice "github.com/GeertJohan/go.rice"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
) )
//go:embed bootstrap
var bootstrapfs embed.FS
func BuiltinBootstrap() ([]peer.AddrInfo, error) { func BuiltinBootstrap() ([]peer.AddrInfo, error) {
if DisableBuiltinAssets { if DisableBuiltinAssets {
return nil, nil return nil, nil
} }
b := rice.MustFindBox("bootstrap")
if BootstrappersFile != "" { if BootstrappersFile != "" {
spi := b.MustString(BootstrappersFile) spi, err := bootstrapfs.ReadFile(path.Join("bootstrap", BootstrappersFile))
if spi == "" { if err != nil {
return nil, err
}
if len(spi) == 0 {
return nil, nil return nil, nil
} }
return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(spi), "\n")) return addrutil.ParseAddresses(context.TODO(), strings.Split(strings.TrimSpace(string(spi)), "\n"))
} }
return nil, nil return nil, nil

View File

@ -1,2 +1,2 @@
/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWRkaF18SR3E6qL6dkGrozT8QJUV5VbhE9E7BZtPmHqdWJ /dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBbZd7Su9XfLUQ12RynGQ3ZmGY1nGqFntmqop9pLNJE6g
/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWJcJUc23WJjJHGSboGcU3t76z9Lb7CghrH2tiBiDCY4ux /dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWGKRzEY4tJFTmAmrYUpa1CVVohmV9YjJbC9v5XWY2gUji

View File

@ -0,0 +1,2 @@
/dns4/bootstrap-0.interop.fildev.network/tcp/1347/p2p/12D3KooWN86wA54r3v9M8bBYbc1vK9W1ehHDxVGPRaoeUYuXF8R7
/dns4/bootstrap-1.interop.fildev.network/tcp/1347/p2p/12D3KooWNZ41kev8mtBZgWe43qam1VX9pJyf87jnaisQP2urZZ2M

View File

@ -1,23 +1,23 @@
package build package build
import ( import (
rice "github.com/GeertJohan/go.rice" "embed"
"path"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
) )
// moved from now-defunct build/paramfetch.go // moved from now-defunct build/paramfetch.go
var log = logging.Logger("build") var log = logging.Logger("build")
//go:embed genesis
var genesisfs embed.FS
func MaybeGenesis() []byte { func MaybeGenesis() []byte {
builtinGen, err := rice.FindBox("genesis") genBytes, err := genesisfs.ReadFile(path.Join("genesis", GenesisFile))
if err != nil { if err != nil {
log.Warnf("loading built-in genesis: %s", err) log.Warnf("loading built-in genesis: %s", err)
return nil return nil
} }
genBytes, err := builtinGen.Bytes(GenesisFile)
if err != nil {
log.Warnf("loading built-in genesis: %s", err)
}
return genBytes return genBytes
} }

Binary file not shown.

Binary file not shown.

View File

@ -3,13 +3,15 @@ package build
import ( import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"embed"
"encoding/json" "encoding/json"
rice "github.com/GeertJohan/go.rice"
apitypes "github.com/filecoin-project/lotus/api/types" apitypes "github.com/filecoin-project/lotus/api/types"
) )
//go:embed openrpc
var openrpcfs embed.FS
func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument { func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
zr, err := gzip.NewReader(bytes.NewBuffer(data)) zr, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil { if err != nil {
@ -28,16 +30,25 @@ func mustReadGzippedOpenRPCDocument(data []byte) apitypes.OpenRPCDocument {
} }
func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Full() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("full.json.gz") data, err := openrpcfs.ReadFile("openrpc/full.json.gz")
if err != nil {
panic(err)
}
return mustReadGzippedOpenRPCDocument(data) return mustReadGzippedOpenRPCDocument(data)
} }
func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Miner() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("miner.json.gz") data, err := openrpcfs.ReadFile("openrpc/miner.json.gz")
if err != nil {
panic(err)
}
return mustReadGzippedOpenRPCDocument(data) return mustReadGzippedOpenRPCDocument(data)
} }
func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument { func OpenRPCDiscoverJSON_Worker() apitypes.OpenRPCDocument {
data := rice.MustFindBox("openrpc").MustBytes("worker.json.gz") data, err := openrpcfs.ReadFile("openrpc/worker.json.gz")
if err != nil {
panic(err)
}
return mustReadGzippedOpenRPCDocument(data) return mustReadGzippedOpenRPCDocument(data)
} }

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,11 +1,19 @@
package build package build
import rice "github.com/GeertJohan/go.rice" import (
_ "embed"
)
//go:embed proof-params/parameters.json
var params []byte
//go:embed proof-params/srs-inner-product.json
var srs []byte
func ParametersJSON() []byte { func ParametersJSON() []byte {
return rice.MustFindBox("proof-params").MustBytes("parameters.json") return params
} }
func SrsJSON() []byte { func SrsJSON() []byte {
return rice.MustFindBox("proof-params").MustBytes("srs-inner-product.json") return srs
} }

View File

@ -24,29 +24,29 @@ var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
var UpgradeRefuelHeight = abi.ChainEpoch(-3) var UpgradeRefuelHeight = abi.ChainEpoch(-3)
var UpgradeTapeHeight = abi.ChainEpoch(-4) var UpgradeTapeHeight = abi.ChainEpoch(-4)
var UpgradeAssemblyHeight = abi.ChainEpoch(5) var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-5) var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(6) var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(7) var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(8) var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(9) var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(10) var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradeTrustHeight = abi.ChainEpoch(11) var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeNorwegianHeight = abi.ChainEpoch(12) var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeTurboHeight = abi.ChainEpoch(13) var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeHyperdriveHeight = abi.ChainEpoch(14) var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet, 0: DrandMainnet,
} }
func init() { func init() {
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1, abi.RegisteredSealProof_StackedDrg8MiBV1)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10)) policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))

104
build/params_interop.go Normal file
View File

@ -0,0 +1,104 @@
// +build interopnet
package build
import (
"os"
"strconv"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
const BootstrappersFile = "interopnet.pi"
const GenesisFile = "interopnet.car"
var UpgradeBreezeHeight = abi.ChainEpoch(-1)
const BreezeGasTampingDuration = 0
var UpgradeSmokeHeight = abi.ChainEpoch(-1)
var UpgradeIgnitionHeight = abi.ChainEpoch(-2)
var UpgradeRefuelHeight = abi.ChainEpoch(-3)
var UpgradeTapeHeight = abi.ChainEpoch(-4)
var UpgradeAssemblyHeight = abi.ChainEpoch(-5)
var UpgradeLiftoffHeight = abi.ChainEpoch(-6)
var UpgradeKumquatHeight = abi.ChainEpoch(-7)
var UpgradeCalicoHeight = abi.ChainEpoch(-8)
var UpgradePersianHeight = abi.ChainEpoch(-9)
var UpgradeOrangeHeight = abi.ChainEpoch(-10)
var UpgradeClausHeight = abi.ChainEpoch(-11)
var UpgradeTrustHeight = abi.ChainEpoch(-12)
var UpgradeNorwegianHeight = abi.ChainEpoch(-13)
var UpgradeTurboHeight = abi.ChainEpoch(-14)
var UpgradeHyperdriveHeight = abi.ChainEpoch(-15)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandMainnet,
}
func init() {
policy.SetSupportedProofTypes(
abi.RegisteredSealProof_StackedDrg2KiBV1,
abi.RegisteredSealProof_StackedDrg8MiBV1,
abi.RegisteredSealProof_StackedDrg512MiBV1,
)
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
policy.SetPreCommitChallengeDelay(abi.ChainEpoch(10))
getUpgradeHeight := func(ev string, def abi.ChainEpoch) abi.ChainEpoch {
hs, found := os.LookupEnv(ev)
if found {
h, err := strconv.Atoi(hs)
if err != nil {
log.Panicf("failed to parse %s env var", ev)
}
return abi.ChainEpoch(h)
}
return def
}
UpgradeBreezeHeight = getUpgradeHeight("LOTUS_BREEZE_HEIGHT", UpgradeBreezeHeight)
UpgradeSmokeHeight = getUpgradeHeight("LOTUS_SMOKE_HEIGHT", UpgradeSmokeHeight)
UpgradeIgnitionHeight = getUpgradeHeight("LOTUS_IGNITION_HEIGHT", UpgradeIgnitionHeight)
UpgradeRefuelHeight = getUpgradeHeight("LOTUS_REFUEL_HEIGHT", UpgradeRefuelHeight)
UpgradeTapeHeight = getUpgradeHeight("LOTUS_TAPE_HEIGHT", UpgradeTapeHeight)
UpgradeAssemblyHeight = getUpgradeHeight("LOTUS_ACTORSV2_HEIGHT", UpgradeAssemblyHeight)
UpgradeLiftoffHeight = getUpgradeHeight("LOTUS_LIFTOFF_HEIGHT", UpgradeLiftoffHeight)
UpgradeKumquatHeight = getUpgradeHeight("LOTUS_KUMQUAT_HEIGHT", UpgradeKumquatHeight)
UpgradeCalicoHeight = getUpgradeHeight("LOTUS_CALICO_HEIGHT", UpgradeCalicoHeight)
UpgradePersianHeight = getUpgradeHeight("LOTUS_PERSIAN_HEIGHT", UpgradePersianHeight)
UpgradeOrangeHeight = getUpgradeHeight("LOTUS_ORANGE_HEIGHT", UpgradeOrangeHeight)
UpgradeClausHeight = getUpgradeHeight("LOTUS_CLAUS_HEIGHT", UpgradeClausHeight)
UpgradeTrustHeight = getUpgradeHeight("LOTUS_ACTORSV3_HEIGHT", UpgradeTrustHeight)
UpgradeNorwegianHeight = getUpgradeHeight("LOTUS_NORWEGIAN_HEIGHT", UpgradeNorwegianHeight)
UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight)
UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight)
BuildType |= BuildInteropnet
SetAddressNetwork(address.Testnet)
Devnet = true
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
const PropagationDelaySecs = uint64(6)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 2
var WhitelistedBlock = cid.Undef

View File

@ -4,6 +4,7 @@
// +build !calibnet // +build !calibnet
// +build !nerpanet // +build !nerpanet
// +build !butterflynet // +build !butterflynet
// +build !interopnet
package build package build
@ -13,7 +14,6 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
) )
@ -66,8 +66,6 @@ const UpgradeTurboHeight = 712320
var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) var UpgradeHyperdriveHeight = abi.ChainEpoch(892800)
func init() { func init() {
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
SetAddressNetwork(address.Mainnet) SetAddressNetwork(address.Mainnet)
} }

View File

@ -6,4 +6,5 @@ import (
_ "github.com/GeertJohan/go.rice/rice" _ "github.com/GeertJohan/go.rice/rice"
_ "github.com/golang/mock/mockgen" _ "github.com/golang/mock/mockgen"
_ "github.com/whyrusleeping/bencher" _ "github.com/whyrusleeping/bencher"
_ "golang.org/x/tools/cmd/stringer"
) )

View File

@ -1,14 +1,17 @@
package build package build
import "os"
var CurrentCommit string var CurrentCommit string
var BuildType int var BuildType int
const ( const (
BuildDefault = 0 BuildDefault = 0
BuildMainnet = 0x1 BuildMainnet = 0x1
Build2k = 0x2 Build2k = 0x2
BuildDebug = 0x3 BuildDebug = 0x3
BuildCalibnet = 0x4 BuildCalibnet = 0x4
BuildInteropnet = 0x5
) )
func buildType() string { func buildType() string {
@ -23,14 +26,20 @@ func buildType() string {
return "+debug" return "+debug"
case BuildCalibnet: case BuildCalibnet:
return "+calibnet" return "+calibnet"
case BuildInteropnet:
return "+interopnet"
default: default:
return "+huh?" return "+huh?"
} }
} }
// BuildVersion is the local build version, set by build system // BuildVersion is the local build version, set by build system
const BuildVersion = "1.10.0" const BuildVersion = "1.11.1-dev"
func UserVersion() string { func UserVersion() string {
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
return BuildVersion
}
return BuildVersion + buildType() + CurrentCommit return BuildVersion + buildType() + CurrentCommit
} }

View File

@ -6,34 +6,26 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strconv"
"text/template" "text/template"
lotusactors "github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
) )
var latestVersion = 5
var versions = []int{0, 2, 3, 4, latestVersion}
var versionImports = map[int]string{
0: "/",
2: "/v2/",
3: "/v3/",
4: "/v4/",
latestVersion: "/v5/",
}
var actors = map[string][]int{ var actors = map[string][]int{
"account": versions, "account": lotusactors.Versions,
"cron": versions, "cron": lotusactors.Versions,
"init": versions, "init": lotusactors.Versions,
"market": versions, "market": lotusactors.Versions,
"miner": versions, "miner": lotusactors.Versions,
"multisig": versions, "multisig": lotusactors.Versions,
"paych": versions, "paych": lotusactors.Versions,
"power": versions, "power": lotusactors.Versions,
"reward": versions, "system": lotusactors.Versions,
"verifreg": versions, "reward": lotusactors.Versions,
"verifreg": lotusactors.Versions,
} }
func main() { func main() {
@ -72,14 +64,14 @@ func generateAdapters() error {
} }
tpl := template.Must(template.New("").Funcs(template.FuncMap{ tpl := template.Must(template.New("").Funcs(template.FuncMap{
"import": func(v int) string { return versionImports[v] }, "import": func(v int) string { return getVersionImports()[v] },
}).Parse(string(af))) }).Parse(string(af)))
var b bytes.Buffer var b bytes.Buffer
err = tpl.Execute(&b, map[string]interface{}{ err = tpl.Execute(&b, map[string]interface{}{
"versions": versions, "versions": versions,
"latestVersion": latestVersion, "latestVersion": lotusactors.LatestVersion,
}) })
if err != nil { if err != nil {
return err return err
@ -104,14 +96,14 @@ func generateState(actDir string) error {
return xerrors.Errorf("loading state adapter template: %w", err) return xerrors.Errorf("loading state adapter template: %w", err)
} }
for _, version := range versions { for _, version := range lotusactors.Versions {
tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af)))
var b bytes.Buffer var b bytes.Buffer
err := tpl.Execute(&b, map[string]interface{}{ err := tpl.Execute(&b, map[string]interface{}{
"v": version, "v": version,
"import": versionImports[version], "import": getVersionImports()[version],
}) })
if err != nil { if err != nil {
return err return err
@ -135,14 +127,14 @@ func generateMessages(actDir string) error {
return xerrors.Errorf("loading message adapter template: %w", err) return xerrors.Errorf("loading message adapter template: %w", err)
} }
for _, version := range versions { for _, version := range lotusactors.Versions {
tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af))) tpl := template.Must(template.New("").Funcs(template.FuncMap{}).Parse(string(af)))
var b bytes.Buffer var b bytes.Buffer
err := tpl.Execute(&b, map[string]interface{}{ err := tpl.Execute(&b, map[string]interface{}{
"v": version, "v": version,
"import": versionImports[version], "import": getVersionImports()[version],
}) })
if err != nil { if err != nil {
return err return err
@ -168,13 +160,13 @@ func generatePolicy(policyPath string) error {
} }
tpl := template.Must(template.New("").Funcs(template.FuncMap{ tpl := template.Must(template.New("").Funcs(template.FuncMap{
"import": func(v int) string { return versionImports[v] }, "import": func(v int) string { return getVersionImports()[v] },
}).Parse(string(pf))) }).Parse(string(pf)))
var b bytes.Buffer var b bytes.Buffer
err = tpl.Execute(&b, map[string]interface{}{ err = tpl.Execute(&b, map[string]interface{}{
"versions": versions, "versions": lotusactors.Versions,
"latestVersion": latestVersion, "latestVersion": lotusactors.LatestVersion,
}) })
if err != nil { if err != nil {
return err return err
@ -199,13 +191,13 @@ func generateBuiltin(builtinPath string) error {
} }
tpl := template.Must(template.New("").Funcs(template.FuncMap{ tpl := template.Must(template.New("").Funcs(template.FuncMap{
"import": func(v int) string { return versionImports[v] }, "import": func(v int) string { return getVersionImports()[v] },
}).Parse(string(bf))) }).Parse(string(bf)))
var b bytes.Buffer var b bytes.Buffer
err = tpl.Execute(&b, map[string]interface{}{ err = tpl.Execute(&b, map[string]interface{}{
"versions": versions, "versions": lotusactors.Versions,
"latestVersion": latestVersion, "latestVersion": lotusactors.LatestVersion,
}) })
if err != nil { if err != nil {
return err return err
@ -217,3 +209,16 @@ func generateBuiltin(builtinPath string) error {
return nil return nil
} }
func getVersionImports() map[int]string {
versionImports := make(map[int]string, lotusactors.LatestVersion)
for _, v := range lotusactors.Versions {
if v == 0 {
versionImports[v] = "/"
} else {
versionImports[v] = "/v" + strconv.Itoa(v) + "/"
}
}
return versionImports
}

View File

@ -1,6 +1,7 @@
package account package account
import ( import (
"github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -69,8 +70,54 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
switch av {
case actors.Version0:
return make0(store, addr)
case actors.Version2:
return make2(store, addr)
case actors.Version3:
return make3(store, addr)
case actors.Version4:
return make4(store, addr)
case actors.Version5:
return make5(store, addr)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.AccountActorCodeID, nil
case actors.Version2:
return builtin2.AccountActorCodeID, nil
case actors.Version3:
return builtin3.AccountActorCodeID, nil
case actors.Version4:
return builtin4.AccountActorCodeID, nil
case actors.Version5:
return builtin5.AccountActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
PubkeyAddress() (address.Address, error) PubkeyAddress() (address.Address, error)
GetState() interface{}
} }

View File

@ -1,6 +1,7 @@
package account package account
import ( import (
"github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -34,8 +35,30 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store, addr)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.AccountActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
PubkeyAddress() (address.Address, error) PubkeyAddress() (address.Address, error)
GetState() interface{}
} }

View File

@ -20,6 +20,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make{{.v}}(store adt.Store, addr address.Address) (State, error) {
out := state{{.v}}{store: store}
out.State = account{{.v}}.State{Address:addr}
return &out, nil
}
type state{{.v}} struct { type state{{.v}} struct {
account{{.v}}.State account{{.v}}.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state{{.v}} struct {
func (s *state{{.v}}) PubkeyAddress() (address.Address, error) { func (s *state{{.v}}) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -20,6 +20,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make0(store adt.Store, addr address.Address) (State, error) {
out := state0{store: store}
out.State = account0.State{Address: addr}
return &out, nil
}
type state0 struct { type state0 struct {
account0.State account0.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state0 struct {
func (s *state0) PubkeyAddress() (address.Address, error) { func (s *state0) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state0) GetState() interface{} {
return &s.State
}

View File

@ -20,6 +20,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make2(store adt.Store, addr address.Address) (State, error) {
out := state2{store: store}
out.State = account2.State{Address: addr}
return &out, nil
}
type state2 struct { type state2 struct {
account2.State account2.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state2 struct {
func (s *state2) PubkeyAddress() (address.Address, error) { func (s *state2) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state2) GetState() interface{} {
return &s.State
}

View File

@ -20,6 +20,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make3(store adt.Store, addr address.Address) (State, error) {
out := state3{store: store}
out.State = account3.State{Address: addr}
return &out, nil
}
type state3 struct { type state3 struct {
account3.State account3.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state3 struct {
func (s *state3) PubkeyAddress() (address.Address, error) { func (s *state3) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state3) GetState() interface{} {
return &s.State
}

View File

@ -20,6 +20,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make4(store adt.Store, addr address.Address) (State, error) {
out := state4{store: store}
out.State = account4.State{Address: addr}
return &out, nil
}
type state4 struct { type state4 struct {
account4.State account4.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state4 struct {
func (s *state4) PubkeyAddress() (address.Address, error) { func (s *state4) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state4) GetState() interface{} {
return &s.State
}

View File

@ -20,6 +20,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make5(store adt.Store, addr address.Address) (State, error) {
out := state5{store: store}
out.State = account5.State{Address: addr}
return &out, nil
}
type state5 struct { type state5 struct {
account5.State account5.State
store adt.Store store adt.Store
@ -28,3 +34,7 @@ type state5 struct {
func (s *state5) PubkeyAddress() (address.Address, error) { func (s *state5) PubkeyAddress() (address.Address, error) {
return s.Address, nil return s.Address, nil
} }
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -6,9 +6,9 @@ import (
"golang.org/x/xerrors" "golang.org/x/xerrors"
{{range .versions}} {{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing" smoothing{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/util/smoothing"
{{end}} {{end}}
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
@ -17,7 +17,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner" miner{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin/miner"
proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof" proof{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/runtime/proof"
) )
var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr var SystemActorAddr = builtin{{.latestVersion}}.SystemActorAddr
@ -53,13 +53,13 @@ func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight,
} }
{{range .versions}} {{range .versions}}
func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate { func FromV{{.}}FilterEstimate(v{{.}} smoothing{{.}}.FilterEstimate) FilterEstimate {
{{if (eq . 0)}} {{if (eq . 0)}}
return (FilterEstimate)(v{{.}}) //nolint:unconvert return (FilterEstimate)(v{{.}}) //nolint:unconvert
{{else}} {{else}}
return (FilterEstimate)(v{{.}}) return (FilterEstimate)(v{{.}})
{{end}} {{end}}
} }
{{end}} {{end}}
type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error)
@ -80,58 +80,58 @@ func Load(store adt.Store, act *types.Actor) (cbor.Marshaler, error) {
func ActorNameByCode(c cid.Cid) string { func ActorNameByCode(c cid.Cid) string {
switch { switch {
{{range .versions}} {{range .versions}}
case builtin{{.}}.IsBuiltinActor(c): case builtin{{.}}.IsBuiltinActor(c):
return builtin{{.}}.ActorNameByCode(c) return builtin{{.}}.ActorNameByCode(c)
{{end}} {{end}}
default: default:
return "<unknown>" return "<unknown>"
} }
} }
func IsBuiltinActor(c cid.Cid) bool { func IsBuiltinActor(c cid.Cid) bool {
{{range .versions}} {{range .versions}}
if builtin{{.}}.IsBuiltinActor(c) { if builtin{{.}}.IsBuiltinActor(c) {
return true return true
} }
{{end}} {{end}}
return false return false
} }
func IsAccountActor(c cid.Cid) bool { func IsAccountActor(c cid.Cid) bool {
{{range .versions}} {{range .versions}}
if c == builtin{{.}}.AccountActorCodeID { if c == builtin{{.}}.AccountActorCodeID {
return true return true
} }
{{end}} {{end}}
return false return false
} }
func IsStorageMinerActor(c cid.Cid) bool { func IsStorageMinerActor(c cid.Cid) bool {
{{range .versions}} {{range .versions}}
if c == builtin{{.}}.StorageMinerActorCodeID { if c == builtin{{.}}.StorageMinerActorCodeID {
return true return true
} }
{{end}} {{end}}
return false return false
} }
func IsMultisigActor(c cid.Cid) bool { func IsMultisigActor(c cid.Cid) bool {
{{range .versions}} {{range .versions}}
if c == builtin{{.}}.MultisigActorCodeID { if c == builtin{{.}}.MultisigActorCodeID {
return true return true
} }
{{end}} {{end}}
return false return false
} }
func IsPaymentChannelActor(c cid.Cid) bool { func IsPaymentChannelActor(c cid.Cid) bool {
{{range .versions}} {{range .versions}}
if c == builtin{{.}}.PaymentChannelActorCodeID { if c == builtin{{.}}.PaymentChannelActorCodeID {
return true return true
} }
{{end}} {{end}}
return false return false
} }
func makeAddress(addr string) address.Address { func makeAddress(addr string) address.Address {

View File

@ -1,10 +1,42 @@
package cron package cron
import ( import (
builtin{{.latestVersion}} "github.com/filecoin-project/specs-actors{{import .latestVersion}}actors/builtin" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"golang.org/x/xerrors"
"github.com/ipfs/go-cid"
{{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}}
) )
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.CronActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
var ( var (
Address = builtin{{.latestVersion}}.CronActorAddr Address = builtin{{.latestVersion}}.CronActorAddr
Methods = builtin{{.latestVersion}}.MethodsCron Methods = builtin{{.latestVersion}}.MethodsCron
) )
type State interface {
GetState() interface{}
}

View File

@ -1,10 +1,72 @@
package cron package cron
import ( import (
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
) )
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
case actors.Version0:
return make0(store)
case actors.Version2:
return make2(store)
case actors.Version3:
return make3(store)
case actors.Version4:
return make4(store)
case actors.Version5:
return make5(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.CronActorCodeID, nil
case actors.Version2:
return builtin2.CronActorCodeID, nil
case actors.Version3:
return builtin3.CronActorCodeID, nil
case actors.Version4:
return builtin4.CronActorCodeID, nil
case actors.Version5:
return builtin5.CronActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
var ( var (
Address = builtin5.CronActorAddr Address = builtin5.CronActorAddr
Methods = builtin5.MethodsCron Methods = builtin5.MethodsCron
) )
type State interface {
GetState() interface{}
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron{{.v}} "github.com/filecoin-project/specs-actors{{.import}}actors/builtin/cron"
)
var _ State = (*state{{.v}})(nil)
func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
out := state{{.v}}{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make{{.v}}(store adt.Store) (State, error) {
out := state{{.v}}{store: store}
out.State = *cron{{.v}}.ConstructState(cron{{.v}}.BuiltInEntries())
return &out, nil
}
type state{{.v}} struct {
cron{{.v}}.State
store adt.Store
}
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron0 "github.com/filecoin-project/specs-actors/actors/builtin/cron"
)
var _ State = (*state0)(nil)
func load0(store adt.Store, root cid.Cid) (State, error) {
out := state0{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make0(store adt.Store) (State, error) {
out := state0{store: store}
out.State = *cron0.ConstructState(cron0.BuiltInEntries())
return &out, nil
}
type state0 struct {
cron0.State
store adt.Store
}
func (s *state0) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/cron"
)
var _ State = (*state2)(nil)
func load2(store adt.Store, root cid.Cid) (State, error) {
out := state2{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make2(store adt.Store) (State, error) {
out := state2{store: store}
out.State = *cron2.ConstructState(cron2.BuiltInEntries())
return &out, nil
}
type state2 struct {
cron2.State
store adt.Store
}
func (s *state2) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/cron"
)
var _ State = (*state3)(nil)
func load3(store adt.Store, root cid.Cid) (State, error) {
out := state3{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make3(store adt.Store) (State, error) {
out := state3{store: store}
out.State = *cron3.ConstructState(cron3.BuiltInEntries())
return &out, nil
}
type state3 struct {
cron3.State
store adt.Store
}
func (s *state3) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/cron"
)
var _ State = (*state4)(nil)
func load4(store adt.Store, root cid.Cid) (State, error) {
out := state4{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make4(store adt.Store) (State, error) {
out := state4{store: store}
out.State = *cron4.ConstructState(cron4.BuiltInEntries())
return &out, nil
}
type state4 struct {
cron4.State
store adt.Store
}
func (s *state4) GetState() interface{} {
return &s.State
}

View File

@ -0,0 +1,35 @@
package cron
import (
"github.com/ipfs/go-cid"
"github.com/filecoin-project/lotus/chain/actors/adt"
cron5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/cron"
)
var _ State = (*state5)(nil)
func load5(store adt.Store, root cid.Cid) (State, error) {
out := state5{store: store}
err := store.Get(store.Context(), root, &out)
if err != nil {
return nil, err
}
return &out, nil
}
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = *cron5.ConstructState(cron5.BuiltInEntries())
return &out, nil
}
type state5 struct {
cron5.State
store adt.Store
}
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -1,6 +1,7 @@
package init package init
import ( import (
"github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -39,6 +40,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store, networkName)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.InitActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -56,5 +78,12 @@ type State interface {
// Sets the network's name. This should only be used on upgrade/fork. // Sets the network's name. This should only be used on upgrade/fork.
SetNetworkName(name string) error SetNetworkName(name string) error
addressMap() (adt.Map, error) // Sets the next ID for the init actor. This should only be used for testing.
SetNextID(id abi.ActorID) error
// Sets the address map for the init actor. This should only be used for testing.
SetAddressMap(mcid cid.Cid) error
AddressMap() (adt.Map, error)
GetState() interface{}
} }

View File

@ -11,12 +11,12 @@ import (
) )
func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) { func DiffAddressMap(pre, cur State) (*AddressMapChanges, error) {
prem, err := pre.addressMap() prem, err := pre.AddressMap()
if err != nil { if err != nil {
return nil, err return nil, err
} }
curm, err := cur.addressMap() curm, err := cur.AddressMap()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,6 +1,7 @@
package init package init
import ( import (
"github.com/filecoin-project/lotus/chain/actors"
"golang.org/x/xerrors" "golang.org/x/xerrors"
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
@ -74,6 +75,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, networkName string) (State, error) {
switch av {
case actors.Version0:
return make0(store, networkName)
case actors.Version2:
return make2(store, networkName)
case actors.Version3:
return make3(store, networkName)
case actors.Version4:
return make4(store, networkName)
case actors.Version5:
return make5(store, networkName)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.InitActorCodeID, nil
case actors.Version2:
return builtin2.InitActorCodeID, nil
case actors.Version3:
return builtin3.InitActorCodeID, nil
case actors.Version4:
return builtin4.InitActorCodeID, nil
case actors.Version5:
return builtin5.InitActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -91,5 +137,12 @@ type State interface {
// Sets the network's name. This should only be used on upgrade/fork. // Sets the network's name. This should only be used on upgrade/fork.
SetNetworkName(name string) error SetNetworkName(name string) error
addressMap() (adt.Map, error) // Sets the next ID for the init actor. This should only be used for testing.
SetNextID(id abi.ActorID) error
// Sets the address map for the init actor. This should only be used for testing.
SetAddressMap(mcid cid.Cid) error
AddressMap() (adt.Map, error)
GetState() interface{}
} }

View File

@ -29,6 +29,26 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make{{.v}}(store adt.Store, networkName string) (State, error) {
out := state{{.v}}{store: store}
{{if (le .v 2)}}
mr, err := adt{{.v}}.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *init{{.v}}.ConstructState(mr, networkName)
{{else}}
s, err := init{{.v}}.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
{{end}}
return &out, nil
}
type state{{.v}} struct { type state{{.v}} struct {
init{{.v}}.State init{{.v}}.State
store adt.Store store adt.Store
@ -66,6 +86,11 @@ func (s *state{{.v}}) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state{{.v}}) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) { func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) {
m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) m, err := adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
if err != nil { if err != nil {
@ -84,6 +109,15 @@ func (s *state{{.v}}) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state{{.v}}) addressMap() (adt.Map, error) { func (s *state{{.v}}) SetAddressMap(mcid cid.Cid) error {
return adt{{.v}}.AsMap(s.store, s.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}}) s.State.AddressMap = mcid
return nil
} }
func (s *state{{.v}}) AddressMap() (adt.Map, error) {
return adt{{.v}}.AsMap(s.store, s.State.AddressMap{{if (ge .v 3)}}, builtin{{.v}}.DefaultHamtBitwidth{{end}})
}
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -25,6 +25,19 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make0(store adt.Store, networkName string) (State, error) {
out := state0{store: store}
mr, err := adt0.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *init0.ConstructState(mr, networkName)
return &out, nil
}
type state0 struct { type state0 struct {
init0.State init0.State
store adt.Store store adt.Store
@ -62,6 +75,11 @@ func (s *state0) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state0) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state0) Remove(addrs ...address.Address) (err error) { func (s *state0) Remove(addrs ...address.Address) (err error) {
m, err := adt0.AsMap(s.store, s.State.AddressMap) m, err := adt0.AsMap(s.store, s.State.AddressMap)
if err != nil { if err != nil {
@ -80,6 +98,15 @@ func (s *state0) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state0) addressMap() (adt.Map, error) { func (s *state0) SetAddressMap(mcid cid.Cid) error {
return adt0.AsMap(s.store, s.AddressMap) s.State.AddressMap = mcid
return nil
}
func (s *state0) AddressMap() (adt.Map, error) {
return adt0.AsMap(s.store, s.State.AddressMap)
}
func (s *state0) GetState() interface{} {
return &s.State
} }

View File

@ -25,6 +25,19 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make2(store adt.Store, networkName string) (State, error) {
out := state2{store: store}
mr, err := adt2.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *init2.ConstructState(mr, networkName)
return &out, nil
}
type state2 struct { type state2 struct {
init2.State init2.State
store adt.Store store adt.Store
@ -62,6 +75,11 @@ func (s *state2) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state2) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state2) Remove(addrs ...address.Address) (err error) { func (s *state2) Remove(addrs ...address.Address) (err error) {
m, err := adt2.AsMap(s.store, s.State.AddressMap) m, err := adt2.AsMap(s.store, s.State.AddressMap)
if err != nil { if err != nil {
@ -80,6 +98,15 @@ func (s *state2) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state2) addressMap() (adt.Map, error) { func (s *state2) SetAddressMap(mcid cid.Cid) error {
return adt2.AsMap(s.store, s.AddressMap) s.State.AddressMap = mcid
return nil
}
func (s *state2) AddressMap() (adt.Map, error) {
return adt2.AsMap(s.store, s.State.AddressMap)
}
func (s *state2) GetState() interface{} {
return &s.State
} }

View File

@ -27,6 +27,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make3(store adt.Store, networkName string) (State, error) {
out := state3{store: store}
s, err := init3.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state3 struct { type state3 struct {
init3.State init3.State
store adt.Store store adt.Store
@ -64,6 +77,11 @@ func (s *state3) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state3) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state3) Remove(addrs ...address.Address) (err error) { func (s *state3) Remove(addrs ...address.Address) (err error) {
m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth) m, err := adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
if err != nil { if err != nil {
@ -82,6 +100,15 @@ func (s *state3) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state3) addressMap() (adt.Map, error) { func (s *state3) SetAddressMap(mcid cid.Cid) error {
return adt3.AsMap(s.store, s.AddressMap, builtin3.DefaultHamtBitwidth) s.State.AddressMap = mcid
return nil
}
func (s *state3) AddressMap() (adt.Map, error) {
return adt3.AsMap(s.store, s.State.AddressMap, builtin3.DefaultHamtBitwidth)
}
func (s *state3) GetState() interface{} {
return &s.State
} }

View File

@ -27,6 +27,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make4(store adt.Store, networkName string) (State, error) {
out := state4{store: store}
s, err := init4.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state4 struct { type state4 struct {
init4.State init4.State
store adt.Store store adt.Store
@ -64,6 +77,11 @@ func (s *state4) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state4) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state4) Remove(addrs ...address.Address) (err error) { func (s *state4) Remove(addrs ...address.Address) (err error) {
m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth) m, err := adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth)
if err != nil { if err != nil {
@ -82,6 +100,15 @@ func (s *state4) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state4) addressMap() (adt.Map, error) { func (s *state4) SetAddressMap(mcid cid.Cid) error {
return adt4.AsMap(s.store, s.AddressMap, builtin4.DefaultHamtBitwidth) s.State.AddressMap = mcid
return nil
}
func (s *state4) AddressMap() (adt.Map, error) {
return adt4.AsMap(s.store, s.State.AddressMap, builtin4.DefaultHamtBitwidth)
}
func (s *state4) GetState() interface{} {
return &s.State
} }

View File

@ -27,6 +27,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make5(store adt.Store, networkName string) (State, error) {
out := state5{store: store}
s, err := init5.ConstructState(store, networkName)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct { type state5 struct {
init5.State init5.State
store adt.Store store adt.Store
@ -64,6 +77,11 @@ func (s *state5) SetNetworkName(name string) error {
return nil return nil
} }
func (s *state5) SetNextID(id abi.ActorID) error {
s.State.NextID = id
return nil
}
func (s *state5) Remove(addrs ...address.Address) (err error) { func (s *state5) Remove(addrs ...address.Address) (err error) {
m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth) m, err := adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
if err != nil { if err != nil {
@ -82,6 +100,15 @@ func (s *state5) Remove(addrs ...address.Address) (err error) {
return nil return nil
} }
func (s *state5) addressMap() (adt.Map, error) { func (s *state5) SetAddressMap(mcid cid.Cid) error {
return adt5.AsMap(s.store, s.AddressMap, builtin5.DefaultHamtBitwidth) s.State.AddressMap = mcid
return nil
}
func (s *state5) AddressMap() (adt.Map, error) {
return adt5.AsMap(s.store, s.State.AddressMap, builtin5.DefaultHamtBitwidth)
}
func (s *state5) GetState() interface{} {
return &s.State
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -15,6 +16,7 @@ import (
{{end}} {{end}}
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
) )
@ -41,6 +43,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.StorageMarketActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
BalancesChanged(State) (bool, error) BalancesChanged(State) (bool, error)
@ -55,6 +78,7 @@ type State interface {
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) ) (weight, verifiedWeight abi.DealWeight, err error)
NextID() (abi.DealID, error) NextID() (abi.DealID, error)
GetState() interface{}
} }
type BalanceTable interface { type BalanceTable interface {
@ -92,51 +116,67 @@ type DealState struct {
} }
type DealProposal struct { type DealProposal struct {
PieceCID cid.Cid PieceCID cid.Cid
PieceSize abi.PaddedPieceSize PieceSize abi.PaddedPieceSize
VerifiedDeal bool VerifiedDeal bool
Client address.Address Client address.Address
Provider address.Address Provider address.Address
Label string Label string
StartEpoch abi.ChainEpoch StartEpoch abi.ChainEpoch
EndEpoch abi.ChainEpoch EndEpoch abi.ChainEpoch
StoragePricePerEpoch abi.TokenAmount StoragePricePerEpoch abi.TokenAmount
ProviderCollateral abi.TokenAmount ProviderCollateral abi.TokenAmount
ClientCollateral abi.TokenAmount ClientCollateral abi.TokenAmount
} }
type DealStateChanges struct { type DealStateChanges struct {
Added []DealIDState Added []DealIDState
Modified []DealStateChange Modified []DealStateChange
Removed []DealIDState Removed []DealIDState
} }
type DealIDState struct { type DealIDState struct {
ID abi.DealID ID abi.DealID
Deal DealState Deal DealState
} }
// DealStateChange is a change in deal state from -> to // DealStateChange is a change in deal state from -> to
type DealStateChange struct { type DealStateChange struct {
ID abi.DealID ID abi.DealID
From *DealState From *DealState
To *DealState To *DealState
} }
type DealProposalChanges struct { type DealProposalChanges struct {
Added []ProposalIDState Added []ProposalIDState
Removed []ProposalIDState Removed []ProposalIDState
} }
type ProposalIDState struct { type ProposalIDState struct {
ID abi.DealID ID abi.DealID
Proposal DealProposal Proposal DealProposal
} }
func EmptyDealState() *DealState { func EmptyDealState() *DealState {
return &DealState{ return &DealState{
SectorStartEpoch: -1, SectorStartEpoch: -1,
SlashEpoch: -1, SlashEpoch: -1,
LastUpdatedEpoch: -1, LastUpdatedEpoch: -1,
} }
} }
// returns the earned fees and pending fees for a given deal
func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) {
tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch)))
ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch)))
if ef.LessThan(big.Zero()) {
ef = big.Zero()
}
if ef.GreaterThan(tf) {
ef = tf
}
return ef, big.Sub(tf, ef)
}

View File

@ -5,6 +5,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/cbor"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -21,6 +22,7 @@ import (
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/adt"
"github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
@ -76,6 +78,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
case actors.Version0:
return make0(store)
case actors.Version2:
return make2(store)
case actors.Version3:
return make3(store)
case actors.Version4:
return make4(store)
case actors.Version5:
return make5(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.StorageMarketActorCodeID, nil
case actors.Version2:
return builtin2.StorageMarketActorCodeID, nil
case actors.Version3:
return builtin3.StorageMarketActorCodeID, nil
case actors.Version4:
return builtin4.StorageMarketActorCodeID, nil
case actors.Version5:
return builtin5.StorageMarketActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
BalancesChanged(State) (bool, error) BalancesChanged(State) (bool, error)
@ -90,6 +137,7 @@ type State interface {
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
) (weight, verifiedWeight abi.DealWeight, err error) ) (weight, verifiedWeight abi.DealWeight, err error)
NextID() (abi.DealID, error) NextID() (abi.DealID, error)
GetState() interface{}
} }
type BalanceTable interface { type BalanceTable interface {
@ -175,3 +223,19 @@ func EmptyDealState() *DealState {
LastUpdatedEpoch: -1, LastUpdatedEpoch: -1,
} }
} }
// returns the earned fees and pending fees for a given deal
func (deal DealProposal) GetDealFees(height abi.ChainEpoch) (abi.TokenAmount, abi.TokenAmount) {
tf := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(deal.EndEpoch-deal.StartEpoch)))
ef := big.Mul(deal.StoragePricePerEpoch, big.NewInt(int64(height-deal.StartEpoch)))
if ef.LessThan(big.Zero()) {
ef = big.Zero()
}
if ef.GreaterThan(tf) {
ef = tf
}
return ef, big.Sub(tf, ef)
}

View File

@ -26,6 +26,31 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make{{.v}}(store adt.Store) (State, error) {
out := state{{.v}}{store: store}
{{if (le .v 2)}}
ea, err := adt{{.v}}.MakeEmptyArray(store).Root()
if err != nil {
return nil, err
}
em, err := adt{{.v}}.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *market{{.v}}.ConstructState(ea, em, em)
{{else}}
s, err := market{{.v}}.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
{{end}}
return &out, nil
}
type state{{.v}} struct { type state{{.v}} struct {
market{{.v}}.State market{{.v}}.State
store adt.Store store adt.Store
@ -207,3 +232,7 @@ func (s *dealProposals{{.v}}) array() adt.Array {
func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal { func fromV{{.v}}DealProposal(v{{.v}} market{{.v}}.DealProposal) DealProposal {
return (DealProposal)(v{{.v}}) return (DealProposal)(v{{.v}})
} }
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -26,6 +26,24 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make0(store adt.Store) (State, error) {
out := state0{store: store}
ea, err := adt0.MakeEmptyArray(store).Root()
if err != nil {
return nil, err
}
em, err := adt0.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *market0.ConstructState(ea, em, em)
return &out, nil
}
type state0 struct { type state0 struct {
market0.State market0.State
store adt.Store store adt.Store
@ -207,3 +225,7 @@ func (s *dealProposals0) array() adt.Array {
func fromV0DealProposal(v0 market0.DealProposal) DealProposal { func fromV0DealProposal(v0 market0.DealProposal) DealProposal {
return (DealProposal)(v0) return (DealProposal)(v0)
} }
func (s *state0) GetState() interface{} {
return &s.State
}

View File

@ -26,6 +26,24 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make2(store adt.Store) (State, error) {
out := state2{store: store}
ea, err := adt2.MakeEmptyArray(store).Root()
if err != nil {
return nil, err
}
em, err := adt2.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State = *market2.ConstructState(ea, em, em)
return &out, nil
}
type state2 struct { type state2 struct {
market2.State market2.State
store adt.Store store adt.Store
@ -207,3 +225,7 @@ func (s *dealProposals2) array() adt.Array {
func fromV2DealProposal(v2 market2.DealProposal) DealProposal { func fromV2DealProposal(v2 market2.DealProposal) DealProposal {
return (DealProposal)(v2) return (DealProposal)(v2)
} }
func (s *state2) GetState() interface{} {
return &s.State
}

View File

@ -26,6 +26,19 @@ func load3(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make3(store adt.Store) (State, error) {
out := state3{store: store}
s, err := market3.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state3 struct { type state3 struct {
market3.State market3.State
store adt.Store store adt.Store
@ -207,3 +220,7 @@ func (s *dealProposals3) array() adt.Array {
func fromV3DealProposal(v3 market3.DealProposal) DealProposal { func fromV3DealProposal(v3 market3.DealProposal) DealProposal {
return (DealProposal)(v3) return (DealProposal)(v3)
} }
func (s *state3) GetState() interface{} {
return &s.State
}

View File

@ -26,6 +26,19 @@ func load4(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make4(store adt.Store) (State, error) {
out := state4{store: store}
s, err := market4.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state4 struct { type state4 struct {
market4.State market4.State
store adt.Store store adt.Store
@ -207,3 +220,7 @@ func (s *dealProposals4) array() adt.Array {
func fromV4DealProposal(v4 market4.DealProposal) DealProposal { func fromV4DealProposal(v4 market4.DealProposal) DealProposal {
return (DealProposal)(v4) return (DealProposal)(v4)
} }
func (s *state4) GetState() interface{} {
return &s.State
}

View File

@ -26,6 +26,19 @@ func load5(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make5(store adt.Store) (State, error) {
out := state5{store: store}
s, err := market5.ConstructState(store)
if err != nil {
return nil, err
}
out.State = *s
return &out, nil
}
type state5 struct { type state5 struct {
market5.State market5.State
store adt.Store store adt.Store
@ -207,3 +220,7 @@ func (s *dealProposals5) array() adt.Array {
func fromV5DealProposal(v5 market5.DealProposal) DealProposal { func fromV5DealProposal(v5 market5.DealProposal) DealProposal {
return (DealProposal)(v5) return (DealProposal)(v5)
} }
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -3,6 +3,7 @@ package miner
import ( import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -21,6 +22,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
{{range .versions}} {{range .versions}}
builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"
{{end}} {{end}}
@ -60,6 +62,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.StorageMinerActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -75,9 +98,18 @@ type State interface {
FindSector(abi.SectorNumber) (*SectorLocation, error) FindSector(abi.SectorNumber) (*SectorLocation, error)
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
NumLiveSectors() (uint64, error) NumLiveSectors() (uint64, error)
IsAllocated(abi.SectorNumber) (bool, error) IsAllocated(abi.SectorNumber) (bool, error)
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)
// Testing only
EraseAllUnproven() error
LoadDeadline(idx uint64) (Deadline, error) LoadDeadline(idx uint64) (Deadline, error)
ForEachDeadline(cb func(idx uint64, dl Deadline) error) error ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
@ -95,6 +127,7 @@ type State interface {
decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
precommits() (adt.Map, error) precommits() (adt.Map, error)
decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
GetState() interface{}
} }
type Deadline interface { type Deadline interface {
@ -148,6 +181,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old

View File

@ -3,6 +3,7 @@ package miner
import ( import (
"github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/chain/actors"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
"github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peer"
cbg "github.com/whyrusleeping/cbor-gen" cbg "github.com/whyrusleeping/cbor-gen"
@ -21,6 +22,7 @@ import (
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
@ -95,6 +97,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version) (State, error) {
switch av {
case actors.Version0:
return make0(store)
case actors.Version2:
return make2(store)
case actors.Version3:
return make3(store)
case actors.Version4:
return make4(store)
case actors.Version5:
return make5(store)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.StorageMinerActorCodeID, nil
case actors.Version2:
return builtin2.StorageMinerActorCodeID, nil
case actors.Version3:
return builtin3.StorageMinerActorCodeID, nil
case actors.Version4:
return builtin4.StorageMinerActorCodeID, nil
case actors.Version5:
return builtin5.StorageMinerActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -110,9 +157,18 @@ type State interface {
FindSector(abi.SectorNumber) (*SectorLocation, error) FindSector(abi.SectorNumber) (*SectorLocation, error)
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error) GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
ForEachPrecommittedSector(func(SectorPreCommitOnChainInfo) error) error
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error) LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
NumLiveSectors() (uint64, error) NumLiveSectors() (uint64, error)
IsAllocated(abi.SectorNumber) (bool, error) IsAllocated(abi.SectorNumber) (bool, error)
// UnallocatedSectorNumbers returns up to count unallocated sector numbers (or less than
// count if there aren't enough).
UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error)
// Note that ProvingPeriodStart is deprecated and will be renamed / removed in a future version of actors
GetProvingPeriodStart() (abi.ChainEpoch, error)
// Testing only
EraseAllUnproven() error
LoadDeadline(idx uint64) (Deadline, error) LoadDeadline(idx uint64) (Deadline, error)
ForEachDeadline(cb func(idx uint64, dl Deadline) error) error ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
@ -130,6 +186,7 @@ type State interface {
decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error) decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
precommits() (adt.Map, error) precommits() (adt.Map, error)
decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error) decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
GetState() interface{}
} }
type Deadline interface { type Deadline interface {
@ -183,6 +240,7 @@ type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
type ProveCommitSectorParams = miner0.ProveCommitSectorParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams
type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams
type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams
func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
// We added support for the new proofs in network version 7, and removed support for the old // We added support for the new proofs in network version 7, and removed support for the old

View File

@ -8,6 +8,7 @@ import (
{{end}} {{end}}
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -35,6 +36,12 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make{{.v}}(store adt.Store) (State, error) {
out := state{{.v}}{store: store}
out.State = miner{{.v}}.State{}
return &out, nil
}
type state{{.v}} struct { type state{{.v}} struct {
miner{{.v}}.State miner{{.v}}.State
store adt.Store store adt.Store
@ -203,6 +210,26 @@ func (s *state{{.v}}) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCom
return &ret, nil return &ret, nil
} }
func (s *state{{.v}}) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
{{if (ge .v 3) -}}
precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors, builtin{{.v}}.DefaultHamtBitwidth)
{{- else -}}
precommitted, err := adt{{.v}}.AsMap(s.store, s.State.PreCommittedSectors)
{{- end}}
if err != nil {
return err
}
var info miner{{.v}}.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV{{.v}}SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors) sectors, err := miner{{.v}}.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -236,15 +263,61 @@ func (s *state{{.v}}) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo
return infos, nil return infos, nil
} }
func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state{{.v}}) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state{{.v}}) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state{{.v}}) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state{{.v}}) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{ {Val: true, Len: abi.MaxSectorNumber} }},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) { func (s *state{{.v}}) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -366,6 +439,45 @@ func (s *state{{.v}}) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (Secto
return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil return fromV{{.v}}SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state{{.v}}) EraseAllUnproven() error {
{{if (ge .v 2)}}
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner{{.v}}.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner{{.v}}.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
{{else}}
// field doesn't exist until v2
{{end}}
return nil
}
func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) { func (d *deadline{{.v}}) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -458,3 +570,7 @@ func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOn
return (SectorPreCommitOnChainInfo)(v0) return (SectorPreCommitOnChainInfo)(v0)
{{end}} {{end}}
} }
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -8,6 +8,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -32,6 +33,12 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make0(store adt.Store) (State, error) {
out := state0{store: store}
out.State = miner0.State{}
return &out, nil
}
type state0 struct { type state0 struct {
miner0.State miner0.State
store adt.Store store adt.Store
@ -200,6 +207,22 @@ func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state0) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt0.AsMap(s.store, s.State.PreCommittedSectors)
if err != nil {
return err
}
var info miner0.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV0SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner0.LoadSectors(s.store, s.State.Sectors) sectors, err := miner0.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -233,15 +256,61 @@ func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state0) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state0) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state0) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state0) LoadDeadline(idx uint64) (Deadline, error) { func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -363,6 +432,13 @@ func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV0SectorPreCommitOnChainInfo(sp), nil return fromV0SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state0) EraseAllUnproven() error {
// field doesn't exist until v2
return nil
}
func (d *deadline0) LoadPartition(idx uint64) (Partition, error) { func (d *deadline0) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -426,3 +502,7 @@ func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) Sect
return (SectorPreCommitOnChainInfo)(v0) return (SectorPreCommitOnChainInfo)(v0)
} }
func (s *state0) GetState() interface{} {
return &s.State
}

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -30,6 +31,12 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make2(store adt.Store) (State, error) {
out := state2{store: store}
out.State = miner2.State{}
return &out, nil
}
type state2 struct { type state2 struct {
miner2.State miner2.State
store adt.Store store adt.Store
@ -198,6 +205,22 @@ func (s *state2) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state2) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt2.AsMap(s.store, s.State.PreCommittedSectors)
if err != nil {
return err
}
var info miner2.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV2SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner2.LoadSectors(s.store, s.State.Sectors) sectors, err := miner2.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -231,15 +254,61 @@ func (s *state2) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state2) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state2) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state2) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state2) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state2) LoadDeadline(idx uint64) (Deadline, error) { func (s *state2) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -361,6 +430,43 @@ func (s *state2) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV2SectorPreCommitOnChainInfo(sp), nil return fromV2SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state2) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner2.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner2.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
return nil
}
func (d *deadline2) LoadPartition(idx uint64) (Partition, error) { func (d *deadline2) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -442,3 +548,7 @@ func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) Sect
} }
} }
func (s *state2) GetState() interface{} {
return &s.State
}

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -32,6 +33,12 @@ func load3(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make3(store adt.Store) (State, error) {
out := state3{store: store}
out.State = miner3.State{}
return &out, nil
}
type state3 struct { type state3 struct {
miner3.State miner3.State
store adt.Store store adt.Store
@ -200,6 +207,22 @@ func (s *state3) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state3) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt3.AsMap(s.store, s.State.PreCommittedSectors, builtin3.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner3.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV3SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner3.LoadSectors(s.store, s.State.Sectors) sectors, err := miner3.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -233,15 +256,61 @@ func (s *state3) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state3) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state3) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state3) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state3) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state3) LoadDeadline(idx uint64) (Deadline, error) { func (s *state3) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -358,6 +427,43 @@ func (s *state3) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV3SectorPreCommitOnChainInfo(sp), nil return fromV3SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state3) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner3.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner3.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
return nil
}
func (d *deadline3) LoadPartition(idx uint64) (Partition, error) { func (d *deadline3) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -443,3 +549,7 @@ func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) Sect
} }
} }
func (s *state3) GetState() interface{} {
return &s.State
}

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -32,6 +33,12 @@ func load4(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make4(store adt.Store) (State, error) {
out := state4{store: store}
out.State = miner4.State{}
return &out, nil
}
type state4 struct { type state4 struct {
miner4.State miner4.State
store adt.Store store adt.Store
@ -200,6 +207,22 @@ func (s *state4) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state4) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt4.AsMap(s.store, s.State.PreCommittedSectors, builtin4.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner4.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV4SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner4.LoadSectors(s.store, s.State.Sectors) sectors, err := miner4.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -233,15 +256,61 @@ func (s *state4) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state4) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state4) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state4) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state4) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state4) LoadDeadline(idx uint64) (Deadline, error) { func (s *state4) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -358,6 +427,43 @@ func (s *state4) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV4SectorPreCommitOnChainInfo(sp), nil return fromV4SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state4) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner4.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner4.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
return nil
}
func (d *deadline4) LoadPartition(idx uint64) (Partition, error) { func (d *deadline4) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -443,3 +549,7 @@ func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) Sect
} }
} }
func (s *state4) GetState() interface{} {
return &s.State
}

View File

@ -6,6 +6,7 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-bitfield"
rle "github.com/filecoin-project/go-bitfield/rle"
"github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/dline"
"github.com/ipfs/go-cid" "github.com/ipfs/go-cid"
@ -32,6 +33,12 @@ func load5(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make5(store adt.Store) (State, error) {
out := state5{store: store}
out.State = miner5.State{}
return &out, nil
}
type state5 struct { type state5 struct {
miner5.State miner5.State
store adt.Store store adt.Store
@ -200,6 +207,22 @@ func (s *state5) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOn
return &ret, nil return &ret, nil
} }
func (s *state5) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error {
precommitted, err := adt5.AsMap(s.store, s.State.PreCommittedSectors, builtin5.DefaultHamtBitwidth)
if err != nil {
return err
}
var info miner5.SectorPreCommitOnChainInfo
if err := precommitted.ForEach(&info, func(_ string) error {
return cb(fromV5SectorPreCommitOnChainInfo(info))
}); err != nil {
return err
}
return nil
}
func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
sectors, err := miner5.LoadSectors(s.store, s.State.Sectors) sectors, err := miner5.LoadSectors(s.store, s.State.Sectors)
if err != nil { if err != nil {
@ -233,15 +256,61 @@ func (s *state5) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, err
return infos, nil return infos, nil
} }
func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) { func (s *state5) loadAllocatedSectorNumbers() (bitfield.BitField, error) {
var allocatedSectors bitfield.BitField var allocatedSectors bitfield.BitField
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors)
return allocatedSectors, err
}
func (s *state5) IsAllocated(num abi.SectorNumber) (bool, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return false, err return false, err
} }
return allocatedSectors.IsSet(uint64(num)) return allocatedSectors.IsSet(uint64(num))
} }
func (s *state5) GetProvingPeriodStart() (abi.ChainEpoch, error) {
return s.State.ProvingPeriodStart, nil
}
func (s *state5) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) {
allocatedSectors, err := s.loadAllocatedSectorNumbers()
if err != nil {
return nil, err
}
allocatedRuns, err := allocatedSectors.RunIterator()
if err != nil {
return nil, err
}
unallocatedRuns, err := rle.Subtract(
&rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}},
allocatedRuns,
)
if err != nil {
return nil, err
}
iter, err := rle.BitsFromRuns(unallocatedRuns)
if err != nil {
return nil, err
}
sectors := make([]abi.SectorNumber, 0, count)
for iter.HasNext() && len(sectors) < count {
nextNo, err := iter.Next()
if err != nil {
return nil, err
}
sectors = append(sectors, abi.SectorNumber(nextNo))
}
return sectors, nil
}
func (s *state5) LoadDeadline(idx uint64) (Deadline, error) { func (s *state5) LoadDeadline(idx uint64) (Deadline, error) {
dls, err := s.State.LoadDeadlines(s.store) dls, err := s.State.LoadDeadlines(s.store)
if err != nil { if err != nil {
@ -358,6 +427,43 @@ func (s *state5) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreC
return fromV5SectorPreCommitOnChainInfo(sp), nil return fromV5SectorPreCommitOnChainInfo(sp), nil
} }
func (s *state5) EraseAllUnproven() error {
dls, err := s.State.LoadDeadlines(s.store)
if err != nil {
return err
}
err = dls.ForEach(s.store, func(dindx uint64, dl *miner5.Deadline) error {
ps, err := dl.PartitionsArray(s.store)
if err != nil {
return err
}
var part miner5.Partition
err = ps.ForEach(&part, func(pindx int64) error {
_ = part.ActivateUnproven()
err = ps.Set(uint64(pindx), &part)
return nil
})
if err != nil {
return err
}
dl.Partitions, err = ps.Root()
if err != nil {
return err
}
return dls.UpdateDeadline(s.store, dindx, dl)
})
return s.State.SaveDeadlines(s.store, dls)
return nil
}
func (d *deadline5) LoadPartition(idx uint64) (Partition, error) { func (d *deadline5) LoadPartition(idx uint64) (Partition, error) {
p, err := d.Deadline.LoadPartition(d.store, idx) p, err := d.Deadline.LoadPartition(d.store, idx)
if err != nil { if err != nil {
@ -443,3 +549,7 @@ func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) Sect
} }
} }
func (s *state5) GetState() interface{} {
return &s.State
}

View File

@ -41,6 +41,27 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return make{{.}}(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
{{end}}
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
{{range .versions}}
case actors.Version{{.}}:
return builtin{{.}}.MultisigActorCodeID, nil
{{end}}
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -56,6 +77,7 @@ type State interface {
transactions() (adt.Map, error) transactions() (adt.Map, error)
decodeTransaction(val *cbg.Deferred) (Transaction, error) decodeTransaction(val *cbg.Deferred) (Transaction, error)
GetState() interface{}
} }
type Transaction = msig0.Transaction type Transaction = msig0.Transaction
@ -93,6 +115,7 @@ type MessageBuilder interface {
type ProposalHashData = msig{{.latestVersion}}.ProposalHashData type ProposalHashData = msig{{.latestVersion}}.ProposalHashData
type ProposeReturn = msig{{.latestVersion}}.ProposeReturn type ProposeReturn = msig{{.latestVersion}}.ProposeReturn
type ProposeParams = msig{{.latestVersion}}.ProposeParams type ProposeParams = msig{{.latestVersion}}.ProposeParams
type ApproveReturn = msig{{.latestVersion}}.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)} params := msig{{.latestVersion}}.TxnIDParams{ID: msig{{.latestVersion}}.TxnID(id)}

View File

@ -76,6 +76,51 @@ func Load(store adt.Store, act *types.Actor) (State, error) {
return nil, xerrors.Errorf("unknown actor code %s", act.Code) return nil, xerrors.Errorf("unknown actor code %s", act.Code)
} }
func MakeState(store adt.Store, av actors.Version, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
switch av {
case actors.Version0:
return make0(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version2:
return make2(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version3:
return make3(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version4:
return make4(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
case actors.Version5:
return make5(store, signers, threshold, startEpoch, unlockDuration, initialBalance)
}
return nil, xerrors.Errorf("unknown actor version %d", av)
}
func GetActorCodeID(av actors.Version) (cid.Cid, error) {
switch av {
case actors.Version0:
return builtin0.MultisigActorCodeID, nil
case actors.Version2:
return builtin2.MultisigActorCodeID, nil
case actors.Version3:
return builtin3.MultisigActorCodeID, nil
case actors.Version4:
return builtin4.MultisigActorCodeID, nil
case actors.Version5:
return builtin5.MultisigActorCodeID, nil
}
return cid.Undef, xerrors.Errorf("unknown actor version %d", av)
}
type State interface { type State interface {
cbor.Marshaler cbor.Marshaler
@ -91,6 +136,7 @@ type State interface {
transactions() (adt.Map, error) transactions() (adt.Map, error)
decodeTransaction(val *cbg.Deferred) (Transaction, error) decodeTransaction(val *cbg.Deferred) (Transaction, error)
GetState() interface{}
} }
type Transaction = msig0.Transaction type Transaction = msig0.Transaction
@ -140,6 +186,7 @@ type MessageBuilder interface {
type ProposalHashData = msig5.ProposalHashData type ProposalHashData = msig5.ProposalHashData
type ProposeReturn = msig5.ProposeReturn type ProposeReturn = msig5.ProposeReturn
type ProposeParams = msig5.ProposeParams type ProposeParams = msig5.ProposeParams
type ApproveReturn = msig5.ApproveReturn
func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { func txnParams(id uint64, data *ProposalHashData) ([]byte, error) {
params := msig5.TxnIDParams{ID: msig5.TxnID(id)} params := msig5.TxnIDParams{ID: msig5.TxnID(id)}

View File

@ -31,6 +31,32 @@ func load{{.v}}(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make{{.v}}(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
out := state{{.v}}{store: store}
out.State = msig{{.v}}.State{}
out.State.Signers = signers
out.State.NumApprovalsThreshold = threshold
out.State.StartEpoch = startEpoch
out.State.UnlockDuration = unlockDuration
out.State.InitialBalance = initialBalance
{{if (le .v 2)}}
em, err := adt{{.v}}.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State.PendingTxns = em
{{else}}
em, err := adt{{.v}}.StoreEmptyMap(store, builtin{{.v}}.DefaultHamtBitwidth)
if err != nil {
return nil, err
}
out.State.PendingTxns = em
{{end}}
return &out, nil
}
type state{{.v}} struct { type state{{.v}} struct {
msig{{.v}}.State msig{{.v}}.State
store adt.Store store adt.Store
@ -95,3 +121,7 @@ func (s *state{{.v}}) decodeTransaction(val *cbg.Deferred) (Transaction, error)
} }
return tx, nil return tx, nil
} }
func (s *state{{.v}}) GetState() interface{} {
return &s.State
}

View File

@ -28,6 +28,25 @@ func load0(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make0(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
out := state0{store: store}
out.State = msig0.State{}
out.State.Signers = signers
out.State.NumApprovalsThreshold = threshold
out.State.StartEpoch = startEpoch
out.State.UnlockDuration = unlockDuration
out.State.InitialBalance = initialBalance
em, err := adt0.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State.PendingTxns = em
return &out, nil
}
type state0 struct { type state0 struct {
msig0.State msig0.State
store adt.Store store adt.Store
@ -92,3 +111,7 @@ func (s *state0) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
} }
return tx, nil return tx, nil
} }
func (s *state0) GetState() interface{} {
return &s.State
}

View File

@ -28,6 +28,25 @@ func load2(store adt.Store, root cid.Cid) (State, error) {
return &out, nil return &out, nil
} }
func make2(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) {
out := state2{store: store}
out.State = msig2.State{}
out.State.Signers = signers
out.State.NumApprovalsThreshold = threshold
out.State.StartEpoch = startEpoch
out.State.UnlockDuration = unlockDuration
out.State.InitialBalance = initialBalance
em, err := adt2.MakeEmptyMap(store).Root()
if err != nil {
return nil, err
}
out.State.PendingTxns = em
return &out, nil
}
type state2 struct { type state2 struct {
msig2.State msig2.State
store adt.Store store adt.Store
@ -92,3 +111,7 @@ func (s *state2) decodeTransaction(val *cbg.Deferred) (Transaction, error) {
} }
return tx, nil return tx, nil
} }
func (s *state2) GetState() interface{} {
return &s.State
}

Some files were not shown because too many files have changed in this diff Show More