Merge branch 'dev' into master
This commit is contained in:
commit
28566b4d64
@ -1,12 +1,15 @@
|
||||
version: 2.1
|
||||
orbs:
|
||||
go: gotest/tools@0.0.9
|
||||
go: gotest/tools@0.0.13
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
- image: circleci/golang:1.14.6
|
||||
resource_class: 2xlarge
|
||||
ubuntu:
|
||||
docker:
|
||||
- image: ubuntu:19.10
|
||||
|
||||
commands:
|
||||
install-deps:
|
||||
@ -24,6 +27,8 @@ commands:
|
||||
description: is a darwin build environment?
|
||||
type: boolean
|
||||
steps:
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- when:
|
||||
condition: << parameters.linux >>
|
||||
@ -37,16 +42,36 @@ commands:
|
||||
- restore_cache:
|
||||
name: Restore parameters cache
|
||||
keys:
|
||||
- 'v20-1k-lotus-params'
|
||||
- 'v25-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
- run: ./lotus fetch-params --proving-params 1024
|
||||
- run: ./lotus fetch-params 2048
|
||||
- save_cache:
|
||||
name: Save parameters cache
|
||||
key: 'v20-1k-lotus-params'
|
||||
key: 'v25-2k-lotus-params'
|
||||
paths:
|
||||
- /var/tmp/filecoin-proof-parameters/
|
||||
|
||||
install_ipfs:
|
||||
steps:
|
||||
- run: |
|
||||
apt update
|
||||
apt install -y wget
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz
|
||||
wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512
|
||||
if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ]
|
||||
then
|
||||
echo "ipfs failed checksum check"
|
||||
exit 1
|
||||
fi
|
||||
tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz
|
||||
mv go-ipfs/ipfs /usr/local/bin/ipfs
|
||||
chmod +x /usr/local/bin/ipfs
|
||||
git_fetch_all_tags:
|
||||
steps:
|
||||
- run:
|
||||
name: fetch all tags
|
||||
command: |
|
||||
git fetch --all
|
||||
|
||||
jobs:
|
||||
mod-tidy-check:
|
||||
@ -54,7 +79,6 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-download
|
||||
- go/mod-tidy-check
|
||||
|
||||
build-all:
|
||||
@ -62,23 +86,34 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-download
|
||||
- run: sudo apt-get update
|
||||
- run: sudo apt-get install npm
|
||||
- restore_cache:
|
||||
name: restore go mod cache
|
||||
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
|
||||
- run:
|
||||
command: make buildall
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
path: lotus-storage-miner
|
||||
path: lotus-miner
|
||||
- store_artifacts:
|
||||
path: lotus-worker
|
||||
- run: mkdir linux && mv lotus lotus-miner lotus-worker linux/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- linux
|
||||
|
||||
build-debug:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make debug
|
||||
|
||||
test: &test
|
||||
description: |
|
||||
Run tests with gotestsum.
|
||||
parameters:
|
||||
parameters: &test-params
|
||||
executor:
|
||||
type: executor
|
||||
default: golang
|
||||
@ -90,13 +125,16 @@ jobs:
|
||||
type: string
|
||||
default: "./..."
|
||||
description: Import paths of packages to be tested.
|
||||
winpost-test:
|
||||
type: string
|
||||
default: "0"
|
||||
test-suite-name:
|
||||
type: string
|
||||
default: unit
|
||||
description: Test suite name to report to CircleCI.
|
||||
gotestsum-format:
|
||||
type: string
|
||||
default: short
|
||||
default: pkgname-and-test-fails
|
||||
description: gotestsum format. https://github.com/gotestyourself/gotestsum#format
|
||||
coverage:
|
||||
type: string
|
||||
@ -112,30 +150,34 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-download
|
||||
- restore_cache:
|
||||
name: restore go mod cache
|
||||
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
|
||||
- run:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml
|
||||
GOTESTSUM_FORMAT: << parameters.gotestsum-format >>
|
||||
LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >>
|
||||
SKIP_CONFORMANCE: "1"
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports/<< parameters.test-suite-name >>
|
||||
gotestsum -- \
|
||||
mkdir -p /tmp/test-artifacts
|
||||
gotestsum \
|
||||
--format << parameters.gotestsum-format >> \
|
||||
--junitfile /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml \
|
||||
--jsonfile /tmp/test-artifacts/<< parameters.test-suite-name >>.json \
|
||||
-- \
|
||||
<< parameters.coverage >> \
|
||||
<< parameters.go-test-flags >> \
|
||||
<< parameters.packages >>
|
||||
no_output_timeout: 30m
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/<< parameters.test-suite-name >>.json
|
||||
- when:
|
||||
condition: << parameters.codecov-upload >>
|
||||
steps:
|
||||
@ -145,16 +187,92 @@ jobs:
|
||||
shell: /bin/bash -eo pipefail
|
||||
command: |
|
||||
bash <(curl -s https://codecov.io/bash)
|
||||
- save_cache:
|
||||
name: save go mod cache
|
||||
key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }}
|
||||
paths:
|
||||
- "~/go/pkg"
|
||||
- "~/go/src/github.com"
|
||||
- "~/go/src/golang.org"
|
||||
|
||||
test-short:
|
||||
test-short:
|
||||
<<: *test
|
||||
test-window-post:
|
||||
<<: *test
|
||||
test-conformance:
|
||||
description: |
|
||||
Run tests using a corpus of interoperable test vectors for Filecoin
|
||||
implementations to test their correctness and compliance with the Filecoin
|
||||
specifications.
|
||||
parameters:
|
||||
<<: *test-params
|
||||
vectors-branch:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Branch on github.com/filecoin-project/test-vectors to checkout and
|
||||
test with. If empty (the default) the commit defined by the git
|
||||
submodule is used.
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: make deps lotus
|
||||
no_output_timeout: 30m
|
||||
- download-params
|
||||
- when:
|
||||
condition:
|
||||
not:
|
||||
equal: [ "", << parameters.vectors-branch >> ]
|
||||
steps:
|
||||
- run:
|
||||
name: checkout vectors branch
|
||||
command: |
|
||||
cd extern/test-vectors
|
||||
git fetch
|
||||
git checkout origin/<< parameters.vectors-branch >>
|
||||
- go/install-gotestsum:
|
||||
gobin: $HOME/.local/bin
|
||||
version: 0.5.2
|
||||
- run:
|
||||
name: install statediff globally
|
||||
command: |
|
||||
## statediff is optional; we succeed even if compilation fails.
|
||||
mkdir -p /tmp/statediff
|
||||
git clone https://github.com/filecoin-project/statediff.git /tmp/statediff
|
||||
cd /tmp/statediff
|
||||
go install ./cmd/statediff || exit 0
|
||||
- run:
|
||||
name: go test
|
||||
environment:
|
||||
SKIP_CONFORMANCE: "0"
|
||||
command: |
|
||||
mkdir -p /tmp/test-reports
|
||||
mkdir -p /tmp/test-artifacts
|
||||
gotestsum \
|
||||
--format pkgname-and-test-fails \
|
||||
--junitfile /tmp/test-reports/junit.xml \
|
||||
-- \
|
||||
-v -coverpkg ./chain/vm/,github.com/filecoin-project/specs-actors/... -coverprofile=/tmp/conformance.out ./conformance/
|
||||
go tool cover -html=/tmp/conformance.out -o /tmp/test-artifacts/conformance-coverage.html
|
||||
no_output_timeout: 30m
|
||||
- store_test_results:
|
||||
path: /tmp/test-reports
|
||||
- store_artifacts:
|
||||
path: /tmp/test-artifacts/conformance-coverage.html
|
||||
build-lotus-soup:
|
||||
description: |
|
||||
Compile `lotus-soup` Testground test plan using the current version of Lotus.
|
||||
parameters:
|
||||
<<: *test-params
|
||||
executor: << parameters.executor >>
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: cd extern/oni && git submodule sync
|
||||
- run: cd extern/oni && git submodule update --init
|
||||
- run: cd extern/filecoin-ffi && make
|
||||
- run:
|
||||
name: "replace lotus, filecoin-ffi, blst and fil-blst deps"
|
||||
command: cd extern/oni/lotus-soup && go mod edit -replace github.com/filecoin-project/lotus=../../../ && go mod edit -replace github.com/filecoin-project/filecoin-ffi=../../filecoin-ffi && go mod edit -replace github.com/supranational/blst=../../fil-blst/blst && go mod edit -replace github.com/filecoin-project/fil-blst=../../fil-blst
|
||||
- run:
|
||||
name: "build lotus-soup testplan"
|
||||
command: pushd extern/oni/lotus-soup && go build -tags=testground .
|
||||
|
||||
|
||||
build-macos:
|
||||
description: build darwin lotus binary
|
||||
@ -168,8 +286,8 @@ jobs:
|
||||
- run:
|
||||
name: Install go
|
||||
command: |
|
||||
curl -O https://dl.google.com/go/go1.13.4.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.13.4.darwin-amd64.pkg -target /
|
||||
curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \
|
||||
sudo installer -pkg go1.14.2.darwin-amd64.pkg -target /
|
||||
- run:
|
||||
name: Install pkg-config
|
||||
command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config
|
||||
@ -181,21 +299,26 @@ jobs:
|
||||
- run:
|
||||
name: Install jq
|
||||
command: |
|
||||
mkdir $HOME/.bin
|
||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output $HOME/.bin/jq
|
||||
chmod +x $HOME/.bin/jq
|
||||
curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq
|
||||
chmod +x /usr/local/bin/jq
|
||||
- restore_cache:
|
||||
name: restore go mod and cargo cache
|
||||
name: restore cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
- install-deps
|
||||
- go/mod-download
|
||||
- run:
|
||||
command: make build
|
||||
no_output_timeout: 30m
|
||||
- store_artifacts:
|
||||
path: lotus
|
||||
- store_artifacts:
|
||||
path: lotus-storage-miner
|
||||
path: lotus-miner
|
||||
- store_artifacts:
|
||||
path: lotus-worker
|
||||
- run: mkdir darwin && mv lotus lotus-miner lotus-worker darwin/
|
||||
- persist_to_workspace:
|
||||
root: "."
|
||||
paths:
|
||||
- darwin
|
||||
- save_cache:
|
||||
name: save cargo cache
|
||||
key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }}
|
||||
@ -203,6 +326,35 @@ jobs:
|
||||
- "~/.rustup"
|
||||
- "~/.cargo"
|
||||
|
||||
gofmt:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run:
|
||||
command: "! go fmt ./... 2>&1 | read"
|
||||
|
||||
cbor-gen-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make deps
|
||||
- run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: go install github.com/hannahhoward/cbor-gen-for
|
||||
- run: go generate ./...
|
||||
- run: git --no-pager diff
|
||||
- run: git --no-pager diff --quiet
|
||||
|
||||
docs-check:
|
||||
executor: golang
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- run: make docsgen
|
||||
- run: git --no-pager diff
|
||||
- run: git --no-pager diff --quiet
|
||||
|
||||
lint: &lint
|
||||
description: |
|
||||
Run golangci-lint.
|
||||
@ -212,7 +364,7 @@ jobs:
|
||||
default: golang
|
||||
golangci-lint-version:
|
||||
type: string
|
||||
default: 1.17.1
|
||||
default: 1.27.0
|
||||
concurrency:
|
||||
type: string
|
||||
default: '2'
|
||||
@ -228,7 +380,6 @@ jobs:
|
||||
steps:
|
||||
- install-deps
|
||||
- prepare
|
||||
- go/mod-download
|
||||
- run:
|
||||
command: make deps
|
||||
no_output_timeout: 30m
|
||||
@ -238,25 +389,91 @@ jobs:
|
||||
- run:
|
||||
name: Lint
|
||||
command: |
|
||||
$HOME/.local/bin/golangci-lint run -v \
|
||||
$HOME/.local/bin/golangci-lint run -v --timeout 2m \
|
||||
--concurrency << parameters.concurrency >> << parameters.args >>
|
||||
lint-changes:
|
||||
<<: *lint
|
||||
|
||||
lint-all:
|
||||
<<: *lint
|
||||
|
||||
publish:
|
||||
description: publish binary artifacts
|
||||
executor: ubuntu
|
||||
steps:
|
||||
- run:
|
||||
name: Install git jq curl
|
||||
command: apt update && apt install -y git jq curl
|
||||
- checkout
|
||||
- git_fetch_all_tags
|
||||
- checkout
|
||||
- install_ipfs
|
||||
- attach_workspace:
|
||||
at: "."
|
||||
- run:
|
||||
name: Create bundles
|
||||
command: ./scripts/build-bundle.sh
|
||||
- run:
|
||||
name: Publish release
|
||||
command: ./scripts/publish-release.sh
|
||||
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
ci:
|
||||
jobs:
|
||||
- lint-changes:
|
||||
args: "--new-from-rev origin/master"
|
||||
- lint-all:
|
||||
concurrency: "16" # expend all docker 2xlarge CPUs.
|
||||
- mod-tidy-check
|
||||
- gofmt
|
||||
- cbor-gen-check
|
||||
- docs-check
|
||||
- test:
|
||||
codecov-upload: true
|
||||
test-suite-name: full
|
||||
- test-window-post:
|
||||
go-test-flags: "-run=TestWindowedPost"
|
||||
winpost-test: "1"
|
||||
test-suite-name: window-post
|
||||
- test-short:
|
||||
go-test-flags: "--timeout 10m --short"
|
||||
- mod-tidy-check
|
||||
- build-all
|
||||
- build-macos
|
||||
test-suite-name: short
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- test-conformance:
|
||||
test-suite-name: conformance
|
||||
packages: "./conformance"
|
||||
- test-conformance:
|
||||
name: test-conformance-bleeding-edge
|
||||
test-suite-name: conformance-bleeding-edge
|
||||
packages: "./conformance"
|
||||
vectors-branch: master
|
||||
- build-lotus-soup
|
||||
- build-debug
|
||||
- build-all:
|
||||
requires:
|
||||
- test-short
|
||||
filters:
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- build-macos:
|
||||
requires:
|
||||
- test-short
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
- publish:
|
||||
requires:
|
||||
- build-all
|
||||
- build-macos
|
||||
filters:
|
||||
branches:
|
||||
ignore:
|
||||
- /.*/
|
||||
tags:
|
||||
only:
|
||||
- /^v\d+\.\d+\.\d+$/
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
comment: off
|
||||
ignore:
|
||||
- "cbor_gen.go"
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
1
.dockerignore
Symbolic link
1
.dockerignore
Symbolic link
@ -0,0 +1 @@
|
||||
.gitignore
|
||||
15
.github/CODEOWNERS
vendored
Normal file
15
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
## filecoin-project/lotus CODEOWNERS
|
||||
## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
|
||||
##
|
||||
## These users or groups will be automatically assigned as reviewers every time
|
||||
## a PR is submitted that modifies code in the specified locations.
|
||||
##
|
||||
## The Lotus repo configuration requires that at least ONE codeowner approves
|
||||
## the PR before merging.
|
||||
|
||||
### Global owners.
|
||||
* @magik6k @whyrusleeping @Kubuxu
|
||||
|
||||
### Conformance testing.
|
||||
conformance/ @raulk
|
||||
extern/test-vectors @raulk
|
||||
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
2
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -21,7 +21,7 @@ A clear and concise description of what you expected to happen.
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Version (run `lotus --version`):**
|
||||
**Version (run `lotus version`):**
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
|
||||
43
.github/ISSUE_TEMPLATE/sealingfailed.md
vendored
Normal file
43
.github/ISSUE_TEMPLATE/sealingfailed.md
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
name: Sealing Issues
|
||||
about: Create a report for help with sealing (commit) failures.
|
||||
title: ''
|
||||
labels: 'sealing'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Please provide all the information requested here to help us troubleshoot "commit failed" issues.
|
||||
If the information requested is missing, we will probably have to just ask you to provide it anyway,
|
||||
before we can help debug.
|
||||
|
||||
**Describe the problem**
|
||||
|
||||
A brief description of the problem you encountered while proving (sealing) a sector.
|
||||
|
||||
Including what commands you ran, and a description of your setup, is very helpful.
|
||||
|
||||
**Sectors status**
|
||||
|
||||
The output of `lotus-miner sectors status --log <sectorId>` for the failed sector(s).
|
||||
|
||||
**Lotus miner logs**
|
||||
|
||||
Please go through the logs of your miner, and include screenshots of any error-like messages you find.
|
||||
|
||||
Alternatively please upload full log files and share a link here
|
||||
|
||||
**Lotus miner diagnostic info**
|
||||
|
||||
Please collect the following diagnostic information, and share a link here
|
||||
|
||||
* lotus-miner diagnostic info `lotus-miner info all > allinfo`
|
||||
|
||||
** Code modifications **
|
||||
|
||||
If you have modified parts of lotus, please describe which areas were modified,
|
||||
and the scope of those modifications
|
||||
|
||||
**Version**
|
||||
|
||||
The output of `lotus --version`.
|
||||
248
.github/labels.yml
vendored
Normal file
248
.github/labels.yml
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
###
|
||||
### Special magic GitHub labels
|
||||
### https://help.github.com/en/github/building-a-strong-community/encouraging-helpful-contributions-to-your-project-with-labels
|
||||
#
|
||||
- name: "good first issue"
|
||||
color: 7057ff
|
||||
description: "Good for newcomers"
|
||||
- name: "help wanted"
|
||||
color: 008672
|
||||
description: "Extra attention is needed"
|
||||
|
||||
###
|
||||
### Goals
|
||||
#
|
||||
- name: goal/incentives
|
||||
color: ff004d
|
||||
description: "Incentinet"
|
||||
|
||||
###
|
||||
### Areas
|
||||
#
|
||||
- name: area/ux
|
||||
color: 00A4E0
|
||||
description: "Area: UX"
|
||||
- name: area/chain/vm
|
||||
color: 00A4E2
|
||||
description: "Area: Chain/VM"
|
||||
- name: area/chain/sync
|
||||
color: 00A4E4
|
||||
description: "Area: Chain/Sync"
|
||||
- name: area/chain/misc
|
||||
color: 00A4E6
|
||||
description: "Area: Chain/Misc"
|
||||
- name: area/markets
|
||||
color: 00A4E8
|
||||
description: "Area: Markets"
|
||||
- name: area/sealing/fsm
|
||||
color: 0bb1ed
|
||||
description: "Area: Sealing/FSM"
|
||||
- name: area/sealing/storage
|
||||
color: 0EB4F0
|
||||
description: "Area: Sealing/Storage"
|
||||
- name: area/proving
|
||||
color: 0EB4F0
|
||||
description: "Area: Proving"
|
||||
- name: area/mining
|
||||
color: 10B6F2
|
||||
description: "Area: Mining"
|
||||
- name: area/client/storage
|
||||
color: 13B9F5
|
||||
description: "Area: Client/Storage"
|
||||
- name: area/client/retrieval
|
||||
color: 15BBF7
|
||||
description: "Area: Client/Retrieval"
|
||||
- name: area/wallet
|
||||
color: 15BBF7
|
||||
description: "Area: Wallet"
|
||||
- name: area/payment-channel
|
||||
color: ff6767
|
||||
description: "Area: Payment Channel"
|
||||
- name: area/multisig
|
||||
color: fff0ff
|
||||
description: "Area: Multisig"
|
||||
- name: area/networking
|
||||
color: 273f8a
|
||||
description: "Area: Networking"
|
||||
|
||||
###
|
||||
### Kinds
|
||||
#
|
||||
- name: kind/bug
|
||||
color: c92712
|
||||
description: "Kind: Bug"
|
||||
- name: kind/chore
|
||||
color: fcf0b5
|
||||
description: "Kind: Chore"
|
||||
- name: kind/feature
|
||||
color: FFF3B8
|
||||
description: "Kind: Feature"
|
||||
- name: kind/improvement
|
||||
color: FFF5BA
|
||||
description: "Kind: Improvement"
|
||||
- name: kind/test
|
||||
color: FFF8BD
|
||||
description: "Kind: Test"
|
||||
- name: kind/question
|
||||
color: FFFDC2
|
||||
description: "Kind: Question"
|
||||
- name: kind/enhancement
|
||||
color: FFFFC5
|
||||
description: "Kind: Enhancement"
|
||||
- name: kind/discussion
|
||||
color: FFFFC7
|
||||
description: "Kind: Discussion"
|
||||
|
||||
###
|
||||
### Difficulties
|
||||
#
|
||||
- name: dif/trivial
|
||||
color: b2b7ff
|
||||
description: "Can be confidently tackled by newcomers, who are widely unfamiliar with lotus"
|
||||
- name: dif/easy
|
||||
color: 7886d7
|
||||
description: "An existing lotus user should be able to pick this up"
|
||||
- name: dif/medium
|
||||
color: 6574cd
|
||||
description: "Prior development experience with lotus is likely helpful"
|
||||
- name: dif/hard
|
||||
color: 5661b3
|
||||
description: "Suggests that having worked on the specific component affected by this issue is important"
|
||||
- name: dif/expert
|
||||
color: 2f365f
|
||||
description: "Requires extensive knowledge of the history, implications, ramifications of the issue"
|
||||
|
||||
###
|
||||
### Efforts
|
||||
#
|
||||
- name: effort/minutes
|
||||
color: e8fffe
|
||||
description: "Effort: Minutes"
|
||||
- name: effort/hours
|
||||
color: a0f0ed
|
||||
description: "Effort: Hours"
|
||||
- name: effort/day
|
||||
color: 64d5ca
|
||||
description: "Effort: One Day"
|
||||
- name: effort/days
|
||||
color: 4dc0b5
|
||||
description: "Effort: Multiple Days"
|
||||
- name: effort/week
|
||||
color: 38a89d
|
||||
description: "Effort: One Week"
|
||||
- name: effort/weeks
|
||||
color: 20504f
|
||||
description: "Effort: Multiple Weeks"
|
||||
|
||||
###
|
||||
### Impacts
|
||||
#
|
||||
- name: impact/regression
|
||||
color: f1f5f8
|
||||
description: "Impact: Regression"
|
||||
- name: impact/api-breakage
|
||||
color: ECF0F3
|
||||
description: "Impact: API Breakage"
|
||||
- name: impact/quality
|
||||
color: E7EBEE
|
||||
description: "Impact: Quality"
|
||||
- name: impact/dx
|
||||
color: E2E6E9
|
||||
description: "Impact: Developer Experience"
|
||||
- name: impact/test-flakiness
|
||||
color: DDE1E4
|
||||
description: "Impact: Test Flakiness"
|
||||
- name: impact/consensus
|
||||
color: b20014
|
||||
description: "Impact: Consensus"
|
||||
|
||||
###
|
||||
### Topics
|
||||
#
|
||||
- name: topic/interoperability
|
||||
color: bf0f73
|
||||
description: "Topic: Interoperability"
|
||||
- name: topic/specs
|
||||
color: CC1C80
|
||||
description: "Topic: Specs"
|
||||
- name: topic/docs
|
||||
color: D9298D
|
||||
description: "Topic: Documentation"
|
||||
- name: topic/architecture
|
||||
color: E53599
|
||||
description: "Topic: Architecture"
|
||||
|
||||
###
|
||||
### Priorities
|
||||
###
|
||||
- name: P0
|
||||
color: dd362a
|
||||
description: "P0: Critical Blocker"
|
||||
- name: P1
|
||||
color: ce8048
|
||||
description: "P1: Must be resolved"
|
||||
- name: P2
|
||||
color: dbd81a
|
||||
description: "P2: Should be resolved"
|
||||
- name: P3
|
||||
color: 9fea8f
|
||||
description: "P3: Might get resolved"
|
||||
|
||||
###
|
||||
### Hints
|
||||
#
|
||||
#- name: hint/good-first-issue
|
||||
# color: 7057ff
|
||||
# description: "Hint: Good First Issue"
|
||||
#- name: hint/help-wanted
|
||||
# color: 008672
|
||||
# description: "Hint: Help Wanted"
|
||||
- name: hint/needs-decision
|
||||
color: 33B9A5
|
||||
description: "Hint: Needs Decision"
|
||||
- name: hint/needs-triage
|
||||
color: 1AA08C
|
||||
description: "Hint: Needs Triage"
|
||||
- name: hint/needs-analysis
|
||||
color: 26AC98
|
||||
description: "Hint: Needs Analysis"
|
||||
- name: hint/needs-author-input
|
||||
color: 33B9A5
|
||||
description: "Hint: Needs Author Input"
|
||||
- name: hint/needs-team-input
|
||||
color: 40C6B2
|
||||
description: "Hint: Needs Team Input"
|
||||
- name: hint/needs-community-input
|
||||
color: 4DD3BF
|
||||
description: "Hint: Needs Community Input"
|
||||
- name: hint/needs-review
|
||||
color: 5AE0CC
|
||||
description: "Hint: Needs Review"
|
||||
|
||||
###
|
||||
### Statuses
|
||||
#
|
||||
- name: status/done
|
||||
color: edb3a6
|
||||
description: "Status: Done"
|
||||
- name: status/deferred
|
||||
color: E0A699
|
||||
description: "Status: Deferred"
|
||||
- name: status/in-progress
|
||||
color: D49A8D
|
||||
description: "Status: In Progress"
|
||||
- name: status/blocked
|
||||
color: C78D80
|
||||
description: "Status: Blocked"
|
||||
- name: status/inactive
|
||||
color: BA8073
|
||||
description: "Status: Inactive"
|
||||
- name: status/waiting
|
||||
color: AD7366
|
||||
description: "Status: Waiting"
|
||||
- name: status/rotten
|
||||
color: 7A4033
|
||||
description: "Status: Rotten"
|
||||
- name: status/discarded
|
||||
color: 6D3326
|
||||
description: "Status: Discarded / Won't fix"
|
||||
17
.github/workflows/label-syncer.yml
vendored
Normal file
17
.github/workflows/label-syncer.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
|
||||
name: Label syncer
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '.github/labels.yml'
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
name: Sync labels
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@1.0.0
|
||||
- uses: micnncim/action-label-syncer@v1.0.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
26
.gitignore
vendored
26
.gitignore
vendored
@ -1,19 +1,24 @@
|
||||
/lotus
|
||||
/lotus-storage-miner
|
||||
/lotus-seal-worker
|
||||
/lotus-miner
|
||||
/lotus-worker
|
||||
/lotus-seed
|
||||
/pond
|
||||
/townhall
|
||||
/fountain
|
||||
/stats
|
||||
/bench
|
||||
/lotus-health
|
||||
/lotus-chainwatch
|
||||
/lotus-shed
|
||||
/lotus-pond
|
||||
/lotus-townhall
|
||||
/lotus-fountain
|
||||
/lotus-stats
|
||||
/lotus-bench
|
||||
/lotus-gateway
|
||||
/lotus-pcr
|
||||
/bench.json
|
||||
/lotuspond/front/node_modules
|
||||
/lotuspond/front/build
|
||||
/cmd/lotus-townhall/townhall/node_modules
|
||||
/cmd/lotus-townhall/townhall/build
|
||||
/cmd/lotus-townhall/townhall/package-lock.json
|
||||
extern/filecoin-ffi/rust/target
|
||||
**/*.h
|
||||
**/*.a
|
||||
**/*.pc
|
||||
/**/*/.DS_STORE
|
||||
@ -23,11 +28,14 @@ build/paramfetch.sh
|
||||
/vendor
|
||||
/blocks.dot
|
||||
/blocks.svg
|
||||
/chainwatch
|
||||
/chainwatch.db
|
||||
/bundle
|
||||
/darwin
|
||||
/linux
|
||||
|
||||
*-fuzz.zip
|
||||
/chain/types/work_msg/
|
||||
bin/ipget
|
||||
bin/tmp/*
|
||||
.idea
|
||||
scratchpad
|
||||
|
||||
12
.gitmodules
vendored
12
.gitmodules
vendored
@ -2,3 +2,15 @@
|
||||
path = extern/filecoin-ffi
|
||||
url = https://github.com/filecoin-project/filecoin-ffi.git
|
||||
branch = master
|
||||
[submodule "extern/serialization-vectors"]
|
||||
path = extern/serialization-vectors
|
||||
url = https://github.com/filecoin-project/serialization-vectors
|
||||
[submodule "extern/test-vectors"]
|
||||
path = extern/test-vectors
|
||||
url = https://github.com/filecoin-project/test-vectors.git
|
||||
[submodule "extern/fil-blst"]
|
||||
path = extern/fil-blst
|
||||
url = https://github.com/filecoin-project/fil-blst.git
|
||||
[submodule "extern/oni"]
|
||||
path = extern/oni
|
||||
url = https://github.com/filecoin-project/oni
|
||||
|
||||
@ -1,7 +1,8 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- vet
|
||||
- gofmt
|
||||
- govet
|
||||
- goimports
|
||||
- misspell
|
||||
- goconst
|
||||
@ -21,16 +22,51 @@ issues:
|
||||
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
|
||||
- "Potential file inclusion via variable"
|
||||
- "should have( a package)? comment"
|
||||
- "Error return value of `logging.SetLogLevel` is not checked"
|
||||
- "comment on exported"
|
||||
- "(func|method) \\w+ should be \\w+"
|
||||
- "(type|var|struct field|(method|func) parameter) `\\w+` should be `\\w+`"
|
||||
- "(G306|G301|G307|G108|G302|G204|G104)"
|
||||
- "don't use ALL_CAPS in Go names"
|
||||
- "string .* has .* occurrences, make it a constant"
|
||||
- "a blank import should be only in a main or test package, or have a comment justifying it"
|
||||
- "package comment should be of the form"
|
||||
|
||||
exclude-use-default: false
|
||||
exclude-rules:
|
||||
- path: lotuspond
|
||||
linters:
|
||||
- errcheck
|
||||
|
||||
- path: node/modules/lp2p
|
||||
linters:
|
||||
- golint
|
||||
- path: ".*_test.go"
|
||||
|
||||
- path: build/params_.*\.go
|
||||
linters:
|
||||
- golint
|
||||
|
||||
- path: api/apistruct/struct.go
|
||||
linters:
|
||||
- golint
|
||||
|
||||
- path: .*_test.go
|
||||
linters:
|
||||
- gosec
|
||||
|
||||
- path: chain/vectors/gen/.*
|
||||
linters:
|
||||
- gosec
|
||||
|
||||
- path: cmd/lotus-bench/.*
|
||||
linters:
|
||||
- gosec
|
||||
|
||||
- path: api/test/.*
|
||||
text: "context.Context should be the first parameter"
|
||||
linters:
|
||||
- golint
|
||||
|
||||
linters-settings:
|
||||
goconst:
|
||||
min-occurrences: 6
|
||||
|
||||
570
CHANGELOG.md
570
CHANGELOG.md
@ -1,7 +1,569 @@
|
||||
# lotus changelog
|
||||
# Lotus changelog
|
||||
|
||||
## 0.1.0 / 2019-12-11
|
||||
# 0.8.0 / 2020-09-26
|
||||
|
||||
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [docs.lotu.sh](docs.lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
|
||||
This consensus-breaking release of Lotus introduces an upgrade to the network. The changes that break consensus are:
|
||||
|
||||
A huge thank you to all contributors for this testnet release!
|
||||
- Upgrading to specs-actors v0.9.11, which reduces WindowPoSt faults per [FIP 0002](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0002.md) to reduce cost for honest miners with occasional faults (see https://github.com/filecoin-project/specs-actors/pull/1181)
|
||||
- Revisions to some cryptoeconomics and network params
|
||||
|
||||
This release also updates go-fil-markets to fix an incompatibility issue between v0.7.2 and earlier versions.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Dependencies
|
||||
|
||||
- Update spec actors to 0.9.11 (https://github.com/filecoin-project/lotus/pull/4039)
|
||||
- Update markets to 0.6.3 (https://github.com/filecoin-project/lotus/pull/4013)
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- Network upgrade (https://github.com/filecoin-project/lotus/pull/4039)
|
||||
- Fix AddSupportedProofTypes (https://github.com/filecoin-project/lotus/pull/4033)
|
||||
- Return an error when we fail to find a sector when checking sector expiration (https://github.com/filecoin-project/lotus/pull/4026)
|
||||
- Batch blockstore copies after block validation (https://github.com/filecoin-project/lotus/pull/3980)
|
||||
- Remove a misleading miner actor abstraction (https://github.com/filecoin-project/lotus/pull/3977)
|
||||
- Fix out-of-bounds when loading all sector infos (https://github.com/filecoin-project/lotus/pull/3976)
|
||||
- Fix break condition in the miner (https://github.com/filecoin-project/lotus/pull/3953)
|
||||
|
||||
#### UX
|
||||
|
||||
- Correct helptext around miners setting ask (https://github.com/filecoin-project/lotus/pull/4009)
|
||||
- Make sync wait nicer (https://github.com/filecoin-project/lotus/pull/3991)
|
||||
|
||||
#### Tooling and validation
|
||||
|
||||
- Small adjustments following network upgradability changes (https://github.com/filecoin-project/lotus/pull/3996)
|
||||
- Add some more big pictures stats to stateroot stat (https://github.com/filecoin-project/lotus/pull/3995)
|
||||
- Add some actors policy setters for testing (https://github.com/filecoin-project/lotus/pull/3975)
|
||||
|
||||
## Contributors
|
||||
|
||||
The following contributors had 5 or more commits go into this release.
|
||||
We are grateful for every contribution!
|
||||
|
||||
| Contributor | Commits | Lines ± |
|
||||
|--------------------|---------|---------------|
|
||||
| arajasek | 66 | +3140/-1261 |
|
||||
| Stebalien | 64 | +3797/-3434 |
|
||||
| magik6k | 48 | +1892/-976 |
|
||||
| raulk | 40 | +2412/-1549 |
|
||||
| vyzo | 22 | +287/-196 |
|
||||
| alanshaw | 15 | +761/-146 |
|
||||
| whyrusleeping | 15 | +736/-52 |
|
||||
| hannahhoward | 14 | +1237/837- |
|
||||
| anton | 6 | +32/-8 |
|
||||
| travisperson | 5 | +502/-6 |
|
||||
| Frank | 5 | +78/-39 |
|
||||
| Jennifer | 5 | +148/-41 |
|
||||
|
||||
# 0.7.2 / 2020-09-23
|
||||
|
||||
This optional release of Lotus introduces a major refactor around how a Lotus node interacts with code from the specs-actors repo. We now use interfaces to read the state of actors, which is required to be able to reason about different versions of actors code at the same time.
|
||||
|
||||
Additionally, this release introduces various improvements to the sync process, as well as changes to better the overall UX experience.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- Network upgrade support (https://github.com/filecoin-project/lotus/pull/3781)
|
||||
- Upgrade markets to `v0.6.2` (https://github.com/filecoin-project/lotus/pull/3974)
|
||||
- Validate chain sync response indices when fetching messages (https://github.com/filecoin-project/lotus/pull/3939)
|
||||
- Add height diff to sync wait (https://github.com/filecoin-project/lotus/pull/3926)
|
||||
- Replace Requires with Wants (https://github.com/filecoin-project/lotus/pull/3898)
|
||||
- Update state diffing for market actor (https://github.com/filecoin-project/lotus/pull/3889)
|
||||
- Parallel fetch for sync (https://github.com/filecoin-project/lotus/pull/3887)
|
||||
- Fix SectorState (https://github.com/filecoin-project/lotus/pull/3881)
|
||||
|
||||
#### User Experience
|
||||
|
||||
- Add basic deal stats api server for spacerace slingshot (https://github.com/filecoin-project/lotus/pull/3963)
|
||||
- When doing `sectors update-state`, show a list of existing states if user inputs an invalid one (https://github.com/filecoin-project/lotus/pull/3944)
|
||||
- Fix `lotus-miner storage find` error (https://github.com/filecoin-project/lotus/pull/3927)
|
||||
- Log shutdown method for lotus daemon and miner (https://github.com/filecoin-project/lotus/pull/3925)
|
||||
- Update build and setup instruction link (https://github.com/filecoin-project/lotus/pull/3919)
|
||||
- Add an option to hide removed sectors from `sectors list` output (https://github.com/filecoin-project/lotus/pull/3903)
|
||||
|
||||
#### Testing and validation
|
||||
|
||||
- Add init.State#Remove() for testing (https://github.com/filecoin-project/lotus/pull/3971)
|
||||
- lotus-shed: add consensus check command (https://github.com/filecoin-project/lotus/pull/3933)
|
||||
- Add keyinfo verify and jwt token command to lotus-shed (https://github.com/filecoin-project/lotus/pull/3914)
|
||||
- Fix conformance gen (https://github.com/filecoin-project/lotus/pull/3892)
|
||||
|
||||
# 0.7.1 / 2020-09-17
|
||||
|
||||
This optional release of Lotus introduces some critical fixes to the window PoSt process. It also upgrades some core dependencies, and introduces many improvements to the mining process, deal-making cycle, and overall User Experience.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Some notable improvements:
|
||||
|
||||
- Correctly construct params for `SubmitWindowedPoSt` messages (https://github.com/filecoin-project/lotus/pull/3909)
|
||||
- Skip sectors correctly for Window PoSt (https://github.com/filecoin-project/lotus/pull/3839)
|
||||
- Split window PoST submission into multiple messages (https://github.com/filecoin-project/lotus/pull/3689)
|
||||
- Improve journal coverage (https://github.com/filecoin-project/lotus/pull/2455)
|
||||
- Allow retrievals while sealing (https://github.com/filecoin-project/lotus/pull/3778)
|
||||
- Don't prune locally published messages (https://github.com/filecoin-project/lotus/pull/3772)
|
||||
- Add get-ask, set-ask retrieval commands (https://github.com/filecoin-project/lotus/pull/3886)
|
||||
- Consistently name winning and window post in logs (https://github.com/filecoin-project/lotus/pull/3873))
|
||||
- Add auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3752))
|
||||
|
||||
#### Dependencies
|
||||
|
||||
- Upgrade markets to `v0.6.1` (https://github.com/filecoin-project/lotus/pull/3906)
|
||||
- Upgrade specs-actors to `v0.9.10` (https://github.com/filecoin-project/lotus/pull/3846)
|
||||
- Upgrade badger (https://github.com/filecoin-project/lotus/pull/3739)
|
||||
|
||||
# 0.7.0 / 2020-09-10
|
||||
|
||||
This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
|
||||
|
||||
- Upgrading the Drand network used from the test Drand network to the League of Entropy main drand network. This is the same Drand network that will be used in the Filecoin mainnet.
|
||||
- Upgrading to specs-actors v0.9.8, which adds a new method to the Multisig actor.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- Fix IsAncestorOf (https://github.com/filecoin-project/lotus/pull/3717)
|
||||
- Update to specs-actors v0.9.8 (https://github.com/filecoin-project/lotus/pull/3725)
|
||||
- Increase chain throughput by 20% (https://github.com/filecoin-project/lotus/pull/3732)
|
||||
- Updare to go-libp2p-pubsub `master` (https://github.com/filecoin-project/lotus/pull/3735)
|
||||
- Drand upgrade (https://github.com/filecoin-project/lotus/pull/3670)
|
||||
- Multisig API additions (https://github.com/filecoin-project/lotus/pull/3590)
|
||||
|
||||
#### Storage Miner
|
||||
|
||||
- Increase the number of times precommit2 is attempted before moving back to precommit1 (https://github.com/filecoin-project/lotus/pull/3720)
|
||||
|
||||
#### Message pool
|
||||
|
||||
- Relax mpool add strictness checks for local pushes (https://github.com/filecoin-project/lotus/pull/3724)
|
||||
|
||||
|
||||
#### Maintenance
|
||||
|
||||
- Fix devnets (https://github.com/filecoin-project/lotus/pull/3712)
|
||||
- Fix(chainwatch): compare prev miner with cur miner (https://github.com/filecoin-project/lotus/pull/3715)
|
||||
- CI: fix statediff build; make optional (https://github.com/filecoin-project/lotus/pull/3729)
|
||||
- Feat: Chaos abort (https://github.com/filecoin-project/lotus/pull/3733)
|
||||
|
||||
## Contributors
|
||||
|
||||
The following contributors had commits go into this release.
|
||||
We are grateful for every contribution!
|
||||
|
||||
| Contributor | Commits | Lines ± |
|
||||
|--------------------|---------|---------------|
|
||||
| arajasek | 28 | +1144/-239 |
|
||||
| Kubuxu | 19 | +452/-261 |
|
||||
| whyrusleeping | 13 | +456/-87 |
|
||||
| vyzo | 11 | +318/-20 |
|
||||
| raulk | 10 | +1289/-350 |
|
||||
| magik6k | 6 | +188/-55 |
|
||||
| dirkmc | 3 | +31/-8 |
|
||||
| alanshaw | 3 | +176/-37 |
|
||||
| Stebalien | 2 | +9/-12 |
|
||||
| lanzafame | 1 | +1/-1 |
|
||||
| frrist | 1 | +1/-1 |
|
||||
| mishmosh | 1 | +1/-1 |
|
||||
| nonsense | 1 | +1/-0 |
|
||||
|
||||
# 0.6.2 / 2020-09-09
|
||||
|
||||
This release introduces some critical fixes to message selection and gas estimation logic. It also adds the ability for nodes to mark a certain tipset as checkpointed, as well as various minor improvements and bugfixes.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Messagepool
|
||||
|
||||
- Warn when optimal selection fails to pack a block and we fall back to random selection (https://github.com/filecoin-project/lotus/pull/3708)
|
||||
- Add basic command for printing gas performance of messages in the mpool (https://github.com/filecoin-project/lotus/pull/3701)
|
||||
- Adjust optimal selection to always try to fill blocks (https://github.com/filecoin-project/lotus/pull/3685)
|
||||
- Fix very minor bug in repub baseFeeLowerBound (https://github.com/filecoin-project/lotus/pull/3663)
|
||||
- Add an auto flag to mpool replace (https://github.com/filecoin-project/lotus/pull/3676)
|
||||
- Fix mpool optimal selection packing failure (https://github.com/filecoin-project/lotus/pull/3698)
|
||||
|
||||
#### Core Lotus
|
||||
|
||||
- Don't use latency as initital estimate for blocksync (https://github.com/filecoin-project/lotus/pull/3648)
|
||||
- Add niceSleep 1 second when drand errors (https://github.com/filecoin-project/lotus/pull/3664)
|
||||
- Fix isChainNearSync check in block validator (https://github.com/filecoin-project/lotus/pull/3650)
|
||||
- Add peer to peer manager before fetching the tipset (https://github.com/filecoin-project/lotus/pull/3667)
|
||||
- Add StageFetchingMessages to sync status (https://github.com/filecoin-project/lotus/pull/3668)
|
||||
- Pass tipset through upgrade logic (https://github.com/filecoin-project/lotus/pull/3673)
|
||||
- Allow nodes to mark tipsets as checkpointed (https://github.com/filecoin-project/lotus/pull/3680)
|
||||
- Remove hard-coded late-fee in window PoSt (https://github.com/filecoin-project/lotus/pull/3702)
|
||||
- Gas: Fix median calc (https://github.com/filecoin-project/lotus/pull/3686)
|
||||
|
||||
#### Storage
|
||||
|
||||
- Storage manager: bail out with an error if unsealed cid is undefined (https://github.com/filecoin-project/lotus/pull/3655)
|
||||
- Storage: return true from Sealer.ReadPiece() on success (https://github.com/filecoin-project/lotus/pull/3657)
|
||||
|
||||
#### Maintenance
|
||||
|
||||
- Resolve lotus, test-vectors, statediff dependency cycle (https://github.com/filecoin-project/lotus/pull/3688)
|
||||
- Paych: add docs on how to use paych status (https://github.com/filecoin-project/lotus/pull/3690)
|
||||
- Initial CODEOWNERS (https://github.com/filecoin-project/lotus/pull/3691)
|
||||
|
||||
# 0.6.1 / 2020-09-08
|
||||
|
||||
This optional release introduces a minor improvement to the sync process, ensuring nodes don't fall behind and then resync.
|
||||
|
||||
## Changes
|
||||
|
||||
- Update `test-vectors` (https://github.com/filecoin-project/lotus/pull/3645)
|
||||
- Revert "only subscribe to pubsub topics once we are synced" (https://github.com/filecoin-project/lotus/pull/3643)
|
||||
|
||||
# 0.6.0 / 2020-09-07
|
||||
|
||||
This consensus-breaking release of Lotus is designed to test a network upgrade on the space race testnet. The changes that break consensus are:
|
||||
|
||||
- Tweaking of some cryptoecon parameters in specs-actors 0.9.7 (https://github.com/filecoin-project/specs-actors/releases/tag/v0.9.7)
|
||||
- Rebalancing FIL distribution to make testnet FIL scarce, which prevents base fee spikes and sets better expectations for mainnet
|
||||
|
||||
This release also introduces many improvements to Lotus! Among them are a new version of go-fil-markets that supports non-blocking retrieval, various spam reduction measures in the messagepool and p2p logic, and UX improvements to payment channels, dealmaking, and state inspection.
|
||||
|
||||
## Changes
|
||||
|
||||
#### Core Lotus and dependencies
|
||||
|
||||
- Implement faucet funds reallocation logic (https://github.com/filecoin-project/lotus/pull/3632)
|
||||
- Network upgrade: Upgrade to correct fork threshold (https://github.com/filecoin-project/lotus/pull/3628)
|
||||
- Update to specs 0.9.7 and markets 0.6.0 (https://github.com/filecoin-project/lotus/pull/3627)
|
||||
- Network upgrade: Perform base fee tamping (https://github.com/filecoin-project/lotus/pull/3623)
|
||||
- Chain events: if cache best() is nil, return chain head (https://github.com/filecoin-project/lotus/pull/3611)
|
||||
- Update to specs actors v0.9.6 (https://github.com/filecoin-project/lotus/pull/3603)
|
||||
|
||||
#### Messagepool
|
||||
|
||||
- Temporarily allow negative chains (https://github.com/filecoin-project/lotus/pull/3625)
|
||||
- Improve publish/republish logic (https://github.com/filecoin-project/lotus/pull/3592)
|
||||
- Fix selection bug; priority messages were not included if other chains were negative (https://github.com/filecoin-project/lotus/pull/3580)
|
||||
- Add defensive check for minimum GasFeeCap for inclusion within the next 20 blocks (https://github.com/filecoin-project/lotus/pull/3579)
|
||||
- Add additional info about gas premium (https://github.com/filecoin-project/lotus/pull/3578)
|
||||
- Fix GasPremium capping logic (https://github.com/filecoin-project/lotus/pull/3552)
|
||||
|
||||
#### Payment channels
|
||||
|
||||
- Get available funds by address or by from/to (https://github.com/filecoin-project/lotus/pull/3547)
|
||||
- Create `lotus paych status` command (https://github.com/filecoin-project/lotus/pull/3523)
|
||||
- Rename CLI command from "paych get" to "paych add-funds" (https://github.com/filecoin-project/lotus/pull/3520)
|
||||
|
||||
#### Peer-to-peer
|
||||
|
||||
- Only subscribe to pubsub topics once we are synced (https://github.com/filecoin-project/lotus/pull/3602)
|
||||
- Reduce mpool add failure log spam (https://github.com/filecoin-project/lotus/pull/3562)
|
||||
- Republish messages even if the chains have negative performance(https://github.com/filecoin-project/lotus/pull/3557)
|
||||
- Adjust gossipsub gossip factor (https://github.com/filecoin-project/lotus/pull/3556)
|
||||
- Integrate pubsub Random Early Drop (https://github.com/filecoin-project/lotus/pull/3518)
|
||||
|
||||
#### Miscellaneous
|
||||
|
||||
- Fix panic in OnDealExpiredSlashed (https://github.com/filecoin-project/lotus/pull/3553)
|
||||
- Robustify state manager against holes in actor method numbers (https://github.com/filecoin-project/lotus/pull/3538)
|
||||
|
||||
#### UX
|
||||
|
||||
- VM: Fix an error message (https://github.com/filecoin-project/lotus/pull/3608)
|
||||
- Documentation: Batch replacement,update lotus-storage-miner to lotus-miner (https://github.com/filecoin-project/lotus/pull/3571)
|
||||
- CLI: Robust actor lookup (https://github.com/filecoin-project/lotus/pull/3535)
|
||||
- Add agent flag to net peers (https://github.com/filecoin-project/lotus/pull/3534)
|
||||
- Add watch option to storage-deals list (https://github.com/filecoin-project/lotus/pull/3527)
|
||||
|
||||
#### Testing & tooling
|
||||
|
||||
- Decommission chain-validation (https://github.com/filecoin-project/lotus/pull/3606)
|
||||
- Metrics: add expected height metric (https://github.com/filecoin-project/lotus/pull/3586)
|
||||
- PCR: Use current tipset during refund (https://github.com/filecoin-project/lotus/pull/3570)
|
||||
- Lotus-shed: Add math command (https://github.com/filecoin-project/lotus/pull/3568)
|
||||
- PCR: Add tipset aggergation (https://github.com/filecoin-project/lotus/pull/3565)- Fix broken paych tests (https://github.com/filecoin-project/lotus/pull/3551)
|
||||
- Make chain export ~1000x times faster (https://github.com/filecoin-project/lotus/pull/3533)
|
||||
- Chainwatch: Stop SyncIncomingBlocks from leaking into chainwatch processing; No panics during processing (https://github.com/filecoin-project/lotus/pull/3526)
|
||||
- Conformance: various changes (https://github.com/filecoin-project/lotus/pull/3521)
|
||||
|
||||
# 0.5.10 / 2020-09-03
|
||||
|
||||
This patch includes a crucial fix to the message pool selection logic, strongly disfavouring messages that might cause a miner penalty.
|
||||
|
||||
## Changes
|
||||
|
||||
- Fix calculation of GasReward in messagepool (https://github.com/filecoin-project/lotus/pull/3528)
|
||||
|
||||
# 0.5.9 / 2020-09-03
|
||||
|
||||
This patch includes a hotfix to the `GasEstimateFeeCap` method, capping the estimated fee to a reasonable level by default.
|
||||
|
||||
## Changes
|
||||
|
||||
- Added target height to sync wait (https://github.com/filecoin-project/lotus/pull/3502)
|
||||
- Disable codecov annotations (https://github.com/filecoin-project/lotus/pull/3514)
|
||||
- Cap fees to reasonable level by default (https://github.com/filecoin-project/lotus/pull/3516)
|
||||
- Add APIs and command to inspect bandwidth usage (https://github.com/filecoin-project/lotus/pull/3497)
|
||||
- Track expected nonce in mpool, ignore messages with large nonce gaps (https://github.com/filecoin-project/lotus/pull/3450)
|
||||
|
||||
# 0.5.8 / 2020-09-02
|
||||
|
||||
This patch includes some bugfixes to the sector sealing process, and updates go-fil-markets. It also improves the performance of blocksync, adds a method to export chain state trees, and improves chainwatch.
|
||||
|
||||
## Changes
|
||||
|
||||
- Upgrade markets to v0.5.9 (https://github.com/filecoin-project/lotus/pull/3496)
|
||||
- Improve blocksync to load fewer messages: (https://github.com/filecoin-project/lotus/pull/3494)
|
||||
- Fix a panic in the ffi-wrapper's `ReadPiece` (https://github.com/filecoin-project/lotus/pull/3492/files)
|
||||
- Fix a deadlock in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3489)
|
||||
- Add test vectors for tipset tests (https://github.com/filecoin-project/lotus/pull/3485/files)
|
||||
- Improve the advance-block debug command (https://github.com/filecoin-project/lotus/pull/3476)
|
||||
- Add toggle for message processing to Lotus PCR (https://github.com/filecoin-project/lotus/pull/3470)
|
||||
- Allow exporting recent chain state trees (https://github.com/filecoin-project/lotus/pull/3463)
|
||||
- Remove height from chain rand (https://github.com/filecoin-project/lotus/pull/3458)
|
||||
- Disable GC on chain badger datastore (https://github.com/filecoin-project/lotus/pull/3457)
|
||||
- Account for `GasPremium` in `GasEstimateFeeCap` (https://github.com/filecoin-project/lotus/pull/3456)
|
||||
- Update go-libp2p-pubsub to `master` (https://github.com/filecoin-project/lotus/pull/3455)
|
||||
- Chainwatch improvements (https://github.com/filecoin-project/lotus/pull/3442)
|
||||
|
||||
# 0.5.7 / 2020-08-31
|
||||
|
||||
This patch release includes some bugfixes and enhancements to the sector lifecycle and message pool logic.
|
||||
|
||||
## Changes
|
||||
|
||||
- Rebuild unsealed infos on miner restart (https://github.com/filecoin-project/lotus/pull/3401)
|
||||
- CLI to attach storage paths to workers (https://github.com/filecoin-project/lotus/pull/3405)
|
||||
- Do not select negative performing message chains for inclusion (https://github.com/filecoin-project/lotus/pull/3392)
|
||||
- Remove a redundant error-check (https://github.com/filecoin-project/lotus/pull/3421)
|
||||
- Correctly move unsealed sectors in `FinalizeSectors` (https://github.com/filecoin-project/lotus/pull/3424)
|
||||
- Improve worker selection logic (https://github.com/filecoin-project/lotus/pull/3425)
|
||||
- Don't use context to close bitswap (https://github.com/filecoin-project/lotus/pull/3430)
|
||||
- Correctly estimate gas premium when there is only one message on chain (https://github.com/filecoin-project/lotus/pull/3428)
|
||||
|
||||
# 0.5.6 / 2020-08-29
|
||||
|
||||
Hotfix release that fixes a panic in the sealing scheduler (https://github.com/filecoin-project/lotus/pull/3389).
|
||||
|
||||
# 0.5.5
|
||||
|
||||
This patch release introduces a large number of improvements to the sealing process.
|
||||
It also updates go-fil-markets to
|
||||
[version 0.5.8](https://github.com/filecoin-project/go-fil-markets/releases/tag/v0.5.8),
|
||||
and go-libp2p-pubsub to [v0.3.5](https://github.com/libp2p/go-libp2p-pubsub/releases/tag/v0.3.5).
|
||||
|
||||
#### Downstream upgrades
|
||||
|
||||
- Upgrades markets to v0.5.8 (https://github.com/filecoin-project/lotus/pull/3384)
|
||||
- Upgrades go-libp2p-pubsub to v0.3.5 (https://github.com/filecoin-project/lotus/pull/3305)
|
||||
|
||||
#### Sector sealing
|
||||
|
||||
- The following improvements were introduced in https://github.com/filecoin-project/lotus/pull/3350.
|
||||
|
||||
- Allow `lotus-miner sectors remove` to remove a sector in any state.
|
||||
- Create a separate state in the storage FSM dedicated to submitting the Commit message.
|
||||
- Recovery for when the Deal IDs of deals in a sector get changed in a reorg.
|
||||
- Auto-retry sending Precommit and Commit messages if they run out of gas
|
||||
- Auto-retry sector remove tasks when they fail
|
||||
- Compact worker windows, and allow their tasks to be executed in any order
|
||||
|
||||
- Don't simply skip PoSt for bad sectors (https://github.com/filecoin-project/lotus/pull/3323)
|
||||
|
||||
#### Message Pool
|
||||
|
||||
- Spam Protection: Track required funds for pending messages (https://github.com/filecoin-project/lotus/pull/3313)
|
||||
|
||||
#### Chainwatch
|
||||
|
||||
- Add more power and reward metrics (https://github.com/filecoin-project/lotus/pull/3367)
|
||||
- Fix raciness in sector deal table (https://github.com/filecoin-project/lotus/pull/3275)
|
||||
- Parallelize miner processing (https://github.com/filecoin-project/lotus/pull/3380)
|
||||
- Accept Lotus API and token (https://github.com/filecoin-project/lotus/pull/3337)
|
||||
|
||||
# 0.5.4
|
||||
|
||||
A patch release, containing a few nice bugfixes and improvements:
|
||||
|
||||
- Fix parsing of peer ID in `lotus-miner actor set-peer-id` (@whyrusleeping)
|
||||
- Update dependencies, fixing several bugs (@Stebalien)
|
||||
- Fix remaining linter warnings (@Stebalien)
|
||||
- Use safe string truncation (@Ingar)
|
||||
- Allow tweaking of blocksync message window size (@whyrusleeping)
|
||||
- Add some additional gas stats to metrics (@Kubuxu)
|
||||
- Fix an edge case bug in message selection, add many tests (@vyzo)
|
||||
|
||||
# 0.5.3
|
||||
|
||||
Yet another hotfix release.
|
||||
A lesson for readers, having people who have been awake for 12+ hours review
|
||||
your hotfix PR is not a good idea. Find someone who has enough slept recently
|
||||
enough to give you good code review, otherwise you'll end up quickly bumping
|
||||
versions again.
|
||||
|
||||
- Fixed a bug in the mempool that was introduced in v0.5.2
|
||||
|
||||
# 0.5.2 / 2020-08-24
|
||||
|
||||
This is a hotfix release.
|
||||
|
||||
- Fix message selection to not include messages that are invalid for block
|
||||
inclusion.
|
||||
- Improve SelectMessage handling of the case where the message pools tipset
|
||||
differs from our mining base.
|
||||
|
||||
# 0.5.1 / 2020-08-24
|
||||
|
||||
The Space Race release!
|
||||
This release contains the genesis car file and bootstrap peers for the space
|
||||
race network.
|
||||
|
||||
Additionally, we included two small fixes to genesis creation:
|
||||
- Randomize ticket value in genesis generation
|
||||
- Correctly set t099 (burnt funds actor) to have valid account actor state
|
||||
|
||||
# 0.5.0 / 2020-08-20
|
||||
|
||||
This version of Lotus will be used for the incentivized testnet Space Race competition,
|
||||
and can be considered mainnet-ready code. It includes some protocol
|
||||
changes, upgrades of core dependencies, and various bugfixes and UX/performance improvements.
|
||||
|
||||
## Highlights
|
||||
|
||||
Among the highlights included in this release are:
|
||||
|
||||
- Gas changes: We implemented EIP-1559 and introduced real gas values.
|
||||
- Deal-making: We now support "Committed Capacity" sectors, "fast-retrieval" deals,
|
||||
and the packing of multiple deals into a single sector.
|
||||
- Renamed features: We renamed some of the binaries, environment variables, and default
|
||||
paths associated with a Lotus node.
|
||||
|
||||
### Gas changes
|
||||
|
||||
We made some significant changes to the mechanics of gas in this release.
|
||||
|
||||
#### Network fee
|
||||
|
||||
We implemented something similar to
|
||||
[Ethereum's EIP-1559](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1559.md).
|
||||
The `Message` structure had three changes:
|
||||
- The `GasPrice` field has been removed
|
||||
- A new `GasFeeCap` field has been added, which controls the maximum cost
|
||||
the sender incurs for the message
|
||||
- A new `GasPremium` field has been added, which controls the reward a miner
|
||||
earns for including the message
|
||||
|
||||
A sender will never be charged more than `GasFeeCap * GasLimit`.
|
||||
A miner will typically earn `GasPremium * GasLimit` as a reward.
|
||||
|
||||
The `Blockheader` structure has one new field, called `ParentBaseFee`.
|
||||
Informally speaking,the `ParentBaseFee`
|
||||
is increased when blocks are densely packed with messages, and decreased otherwise.
|
||||
|
||||
The `ParentBaseFee` is used when calculating how much a sender burns when executing a message. _Burning_ simply refers to sending attoFIL to a dedicated, unreachable account.
|
||||
A message causes `ParentBaseFee * GasUsed` attoFIL to be burnt.
|
||||
|
||||
#### Real gas values
|
||||
|
||||
This release also includes our first "real" gas costs for primitive operations.
|
||||
The costs were designed to account for both the _time_ that message execution takes,
|
||||
as well as the _space_ a message adds to the state tree.
|
||||
|
||||
## Deal-making changes
|
||||
|
||||
There are three key changes to the deal-making process.
|
||||
|
||||
#### Committed Capacity sectors
|
||||
|
||||
Miners can now pledge "Committed Capacity" (CC) sectors, which are explicitly
|
||||
stated as containing junk data, and must not include any deals. Miners can do this
|
||||
to increase their storage power, and win block rewards from this pledged storage.
|
||||
|
||||
They can mark these sectors as "upgradable" with `lotus-miner sectors mark-for-upgrade`.
|
||||
If the miner receives and accepts one or more storage deals, the sector that includes
|
||||
those deals will _replace_ the CC sector. This is intended to maximize the amount of useful
|
||||
storage on the Filecoin network.
|
||||
|
||||
#### Fast-retrieval deals
|
||||
|
||||
Clients can now include a `fast-retrieval` flag when proposing deals with storage miners.
|
||||
If set to true, the miner will include an extra copy of the deal data. This
|
||||
data can be quickly served in a retrieval deal, since it will not need to be unsealed.
|
||||
|
||||
#### Multiple deals per sector
|
||||
|
||||
Miners can now pack multiple deals into a single sector, so long as all the deals
|
||||
fit into the sector capacity. This should increase the packing efficiency of miners.
|
||||
|
||||
### Renamed features
|
||||
|
||||
To improve the user experience, we updated several names to mainatin
|
||||
standard prefixing, and to better reflect the meaning of the features being referenced.
|
||||
|
||||
In particular, the Lotus miner binary is now called `lotus-miner`, the default
|
||||
path for miner data is now `~/.lotusminer`, and the environment variable
|
||||
that sets the path for miner data is now `$LOTUS_MINER_PATH`. A full list of renamed
|
||||
features can be found [here](https://github.com/filecoin-project/lotus/issues/2304).
|
||||
|
||||
## Changelog
|
||||
|
||||
#### Downstream upgrades
|
||||
- Upgrades markets to v0.5.6 (https://github.com/filecoin-project/lotus/pull/3058)
|
||||
- Upgrades specs-actors to v0.9.3 (https://github.com/filecoin-project/lotus/pull/3151)
|
||||
|
||||
#### Core protocol
|
||||
- Introduces gas values, replacing placeholders (https://github.com/filecoin-project/lotus/pull/2343)
|
||||
- Implements EIP-1559, introducing a network base fee, message gas fee cap, and message gas fee premium (https://github.com/filecoin-project/lotus/pull/2874)
|
||||
- Implements Poisson Sortition for elections (https://github.com/filecoin-project/lotus/pull/2084)
|
||||
|
||||
#### Deal-making lifecycle
|
||||
- Introduces "Committed Capacity" sectors (https://github.com/filecoin-project/lotus/pull/2220)
|
||||
- Introduces "fast-retrieval" flag for deals (https://github.com/filecoin-project/lotus/pull/2323
|
||||
- Supports packing multiple deals into one sector (https://github.com/filecoin-project/storage-fsm/pull/38)
|
||||
|
||||
#### Enhancements
|
||||
|
||||
- Optimized message pool selection logic (https://github.com/filecoin-project/lotus/pull/2838)
|
||||
- Window-based scheduling of sealing tasks (https://github.com/filecoin-project/sector-storage/pull/67)
|
||||
- Faster window PoSt (https://github.com/filecoin-project/lotus/pull/2209/files)
|
||||
- Refactors the payment channel manager (https://github.com/filecoin-project/lotus/pull/2640)
|
||||
- Refactors blocksync (https://github.com/filecoin-project/lotus/pull/2715/files)
|
||||
|
||||
#### UX
|
||||
|
||||
- Provide status updates for data-transfer (https://github.com/filecoin-project/lotus/pull/3162, https://github.com/filecoin-project/lotus/pull/3191)
|
||||
- Miners can customise asks (https://github.com/filecoin-project/lotus/pull/2046)
|
||||
- Miners can toggle auto-acceptance of deals (https://github.com/filecoin-project/lotus/pull/1994)
|
||||
- Miners can maintain a blocklist of piece CIDs (https://github.com/filecoin-project/lotus/pull/2069)
|
||||
|
||||
## Contributors
|
||||
|
||||
The following contributors had 10 or more commits go into this release.
|
||||
We are grateful for every contribution!
|
||||
|
||||
| Contributor | Commits | Lines ± |
|
||||
|--------------------|---------|---------------|
|
||||
| magik6k | 361 | +13197/-6136 |
|
||||
| Kubuxu | 227 | +5670/-2587 |
|
||||
| arajasek | 120 | +2916/-1264 |
|
||||
| whyrusleeping | 112 | +3979/-1089 |
|
||||
| vyzo | 99 | +3343/-1305 |
|
||||
| dirkmc | 68 | +8732/-3621 |
|
||||
| laser | 45 | +1489/-501 |
|
||||
| hannahhoward | 43 | +2654/-990 |
|
||||
| frrist | 37 | +6630/-4338 |
|
||||
| schomatis | 28 | +3016/-1368 |
|
||||
| placer14 | 27 | +824/-350 |
|
||||
| raulk | 25 | +28718/-29849 |
|
||||
| mrsmkl | 22 | +560/-368 |
|
||||
| travisperson | 18 | +1354/-314 |
|
||||
| nonsense | 16 | +2956/-2842 |
|
||||
| ingar | 13 | +331/-123 |
|
||||
| daviddias | 11 | +311/-11 |
|
||||
| Stebalien | 11 | +1204/-980 |
|
||||
| RobQuistNL | 10 | +69/-74 |
|
||||
|
||||
# 0.1.0 / 2019-12-11
|
||||
|
||||
We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [lotu.sh](lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues).
|
||||
|
||||
A huge thank you to all contributors for this testnet release!
|
||||
|
||||
234
Makefile
234
Makefile
@ -3,8 +3,10 @@ SHELL=/usr/bin/env bash
|
||||
all: build
|
||||
.PHONY: all
|
||||
|
||||
unexport GOFLAGS
|
||||
|
||||
GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2)
|
||||
ifeq ($(shell expr $(GOVERSION) \< 13), 1)
|
||||
ifeq ($(shell expr $(GOVERSION) \< 14), 1)
|
||||
$(warning Your Golang version is go 1.$(GOVERSION))
|
||||
$(error Update Golang to version $(shell grep '^go' go.mod))
|
||||
endif
|
||||
@ -14,12 +16,19 @@ MODULES:=
|
||||
|
||||
CLEAN:=
|
||||
BINS:=
|
||||
GOFLAGS+=-ldflags="-X "github.com/filecoin-project/lotus/build".CurrentCommit=+git$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))"
|
||||
|
||||
ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null))
|
||||
ifneq ($(strip $(LDFLAGS)),)
|
||||
ldflags+=-extldflags=$(LDFLAGS)
|
||||
endif
|
||||
|
||||
GOFLAGS+=-ldflags="$(ldflags)"
|
||||
|
||||
|
||||
## FFI
|
||||
|
||||
FFI_PATH:=extern/filecoin-ffi/
|
||||
FFI_DEPS:=libfilecoin.a filecoin.pc filecoin.h
|
||||
FFI_DEPS:=.install-filcrypto
|
||||
FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS))
|
||||
|
||||
$(FFI_DEPS): build/.filecoin-install ;
|
||||
@ -49,7 +58,10 @@ deps: $(BUILD_DEPS)
|
||||
.PHONY: deps
|
||||
|
||||
debug: GOFLAGS+=-tags=debug
|
||||
debug: lotus lotus-storage-miner lotus-seal-worker lotus-seed
|
||||
debug: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
2k: GOFLAGS+=-tags=2k
|
||||
2k: lotus lotus-miner lotus-worker lotus-seed
|
||||
|
||||
lotus: $(BUILD_DEPS)
|
||||
rm -f lotus
|
||||
@ -59,36 +71,49 @@ lotus: $(BUILD_DEPS)
|
||||
.PHONY: lotus
|
||||
BINS+=lotus
|
||||
|
||||
lotus-storage-miner: $(BUILD_DEPS)
|
||||
rm -f lotus-storage-miner
|
||||
go build $(GOFLAGS) -o lotus-storage-miner ./cmd/lotus-storage-miner
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-storage-miner -i ./build
|
||||
.PHONY: lotus-storage-miner
|
||||
BINS+=lotus-storage-miner
|
||||
lotus-miner: $(BUILD_DEPS)
|
||||
rm -f lotus-miner
|
||||
go build $(GOFLAGS) -o lotus-miner ./cmd/lotus-storage-miner
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-miner -i ./build
|
||||
.PHONY: lotus-miner
|
||||
BINS+=lotus-miner
|
||||
|
||||
lotus-seal-worker: $(BUILD_DEPS)
|
||||
rm -f lotus-seal-worker
|
||||
go build $(GOFLAGS) -o lotus-seal-worker ./cmd/lotus-seal-worker
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-seal-worker -i ./build
|
||||
.PHONY: lotus-seal-worker
|
||||
BINS+=lotus-seal-worker
|
||||
lotus-worker: $(BUILD_DEPS)
|
||||
rm -f lotus-worker
|
||||
go build $(GOFLAGS) -o lotus-worker ./cmd/lotus-seal-worker
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-worker -i ./build
|
||||
.PHONY: lotus-worker
|
||||
BINS+=lotus-worker
|
||||
|
||||
lotus-shed: $(BUILD_DEPS)
|
||||
rm -f lotus-shed
|
||||
go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed
|
||||
.PHONY: lotus-seal-worker
|
||||
BINS+=lotus-seal-worker
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build
|
||||
.PHONY: lotus-shed
|
||||
BINS+=lotus-shed
|
||||
|
||||
build: lotus lotus-storage-miner lotus-seal-worker
|
||||
lotus-gateway: $(BUILD_DEPS)
|
||||
rm -f lotus-gateway
|
||||
go build $(GOFLAGS) -o lotus-gateway ./cmd/lotus-gateway
|
||||
.PHONY: lotus-gateway
|
||||
BINS+=lotus-gateway
|
||||
|
||||
build: lotus lotus-miner lotus-worker
|
||||
@[[ $$(type -P "lotus") ]] && echo "Caution: you have \
|
||||
an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true
|
||||
|
||||
.PHONY: build
|
||||
|
||||
install:
|
||||
install: install-daemon install-miner install-worker
|
||||
|
||||
install-daemon:
|
||||
install -C ./lotus /usr/local/bin/lotus
|
||||
install -C ./lotus-storage-miner /usr/local/bin/lotus-storage-miner
|
||||
install -C ./lotus-seal-worker /usr/local/bin/lotus-seal-worker
|
||||
|
||||
install-miner:
|
||||
install -C ./lotus-miner /usr/local/bin/lotus-miner
|
||||
|
||||
install-worker:
|
||||
install -C ./lotus-worker /usr/local/bin/lotus-worker
|
||||
|
||||
# TOOLS
|
||||
|
||||
@ -106,51 +131,141 @@ benchmarks:
|
||||
@curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}"
|
||||
.PHONY: benchmarks
|
||||
|
||||
pond: build
|
||||
go build -o pond ./lotuspond
|
||||
lotus-pond: 2k
|
||||
go build -o lotus-pond ./lotuspond
|
||||
(cd lotuspond/front && npm i && CI=false npm run build)
|
||||
.PHONY: pond
|
||||
BINS+=pond
|
||||
.PHONY: lotus-pond
|
||||
BINS+=lotus-pond
|
||||
|
||||
townhall:
|
||||
rm -f townhall
|
||||
go build -o townhall ./cmd/lotus-townhall
|
||||
lotus-townhall:
|
||||
rm -f lotus-townhall
|
||||
go build -o lotus-townhall ./cmd/lotus-townhall
|
||||
(cd ./cmd/lotus-townhall/townhall && npm i && npm run build)
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec townhall -i ./cmd/lotus-townhall -i ./build
|
||||
.PHONY: townhall
|
||||
BINS+=townhall
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-townhall -i ./cmd/lotus-townhall -i ./build
|
||||
.PHONY: lotus-townhall
|
||||
BINS+=lotus-townhall
|
||||
|
||||
fountain:
|
||||
rm -f fountain
|
||||
go build -o fountain ./cmd/lotus-fountain
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec fountain -i ./cmd/lotus-fountain
|
||||
.PHONY: fountain
|
||||
BINS+=fountain
|
||||
lotus-fountain:
|
||||
rm -f lotus-fountain
|
||||
go build -o lotus-fountain ./cmd/lotus-fountain
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-fountain -i ./cmd/lotus-fountain -i ./build
|
||||
.PHONY: lotus-fountain
|
||||
BINS+=lotus-fountain
|
||||
|
||||
chainwatch:
|
||||
rm -f chainwatch
|
||||
go build -o chainwatch ./cmd/lotus-chainwatch
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec chainwatch -i ./cmd/lotus-chainwatch
|
||||
.PHONY: chainwatch
|
||||
BINS+=chainwatch
|
||||
lotus-chainwatch:
|
||||
rm -f lotus-chainwatch
|
||||
go build $(GOFLAGS) -o lotus-chainwatch ./cmd/lotus-chainwatch
|
||||
.PHONY: lotus-chainwatch
|
||||
BINS+=lotus-chainwatch
|
||||
|
||||
bench:
|
||||
rm -f bench
|
||||
go build -o bench ./cmd/lotus-bench
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec bench -i ./build
|
||||
.PHONY: bench
|
||||
BINS+=bench
|
||||
lotus-bench:
|
||||
rm -f lotus-bench
|
||||
go build -o lotus-bench ./cmd/lotus-bench
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-bench -i ./build
|
||||
.PHONY: lotus-bench
|
||||
BINS+=lotus-bench
|
||||
|
||||
stats:
|
||||
rm -f stats
|
||||
go build -o stats ./tools/stats
|
||||
.PHONY: stats
|
||||
BINS+=stats
|
||||
lotus-stats:
|
||||
rm -f lotus-stats
|
||||
go build -o lotus-stats ./cmd/lotus-stats
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-stats -i ./build
|
||||
.PHONY: lotus-stats
|
||||
BINS+=lotus-stats
|
||||
|
||||
lotus-pcr:
|
||||
rm -f lotus-pcr
|
||||
go build $(GOFLAGS) -o lotus-pcr ./cmd/lotus-pcr
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-pcr -i ./build
|
||||
.PHONY: lotus-pcr
|
||||
BINS+=lotus-pcr
|
||||
|
||||
lotus-health:
|
||||
rm -f lotus-health
|
||||
go build -o lotus-health ./cmd/lotus-health
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build
|
||||
.PHONY: lotus-health
|
||||
BINS+=lotus-health
|
||||
|
||||
testground:
|
||||
go build -tags testground -o /dev/null ./cmd/lotus
|
||||
.PHONY: testground
|
||||
BINS+=testground
|
||||
|
||||
install-chainwatch: lotus-chainwatch
|
||||
install -C ./lotus-chainwatch /usr/local/bin/lotus-chainwatch
|
||||
|
||||
# SYSTEMD
|
||||
|
||||
install-daemon-service: install-daemon
|
||||
mkdir -p /etc/systemd/system
|
||||
mkdir -p /var/log/lotus
|
||||
install -C -m 0644 ./scripts/lotus-daemon.service /etc/systemd/system/lotus-daemon.service
|
||||
systemctl daemon-reload
|
||||
@echo
|
||||
@echo "lotus-daemon service installed. Don't forget to run 'sudo systemctl start lotus-daemon' to start it and 'sudo systemctl enable lotus-daemon' for it to be enabled on startup."
|
||||
|
||||
install-miner-service: install-miner install-daemon-service
|
||||
mkdir -p /etc/systemd/system
|
||||
mkdir -p /var/log/lotus
|
||||
install -C -m 0644 ./scripts/lotus-miner.service /etc/systemd/system/lotus-miner.service
|
||||
systemctl daemon-reload
|
||||
@echo
|
||||
@echo "lotus-miner service installed. Don't forget to run 'sudo systemctl start lotus-miner' to start it and 'sudo systemctl enable lotus-miner' for it to be enabled on startup."
|
||||
|
||||
install-chainwatch-service: install-chainwatch install-daemon-service
|
||||
mkdir -p /etc/systemd/system
|
||||
mkdir -p /var/log/lotus
|
||||
install -C -m 0644 ./scripts/lotus-chainwatch.service /etc/systemd/system/lotus-chainwatch.service
|
||||
systemctl daemon-reload
|
||||
@echo
|
||||
@echo "chainwatch service installed. Don't forget to run 'sudo systemctl start lotus-chainwatch' to start it and 'sudo systemctl enable lotus-chainwatch' for it to be enabled on startup."
|
||||
|
||||
install-main-services: install-miner-service
|
||||
|
||||
install-all-services: install-main-services install-chainwatch-service
|
||||
|
||||
install-services: install-main-services
|
||||
|
||||
clean-daemon-service: clean-miner-service clean-chainwatch-service
|
||||
-systemctl stop lotus-daemon
|
||||
-systemctl disable lotus-daemon
|
||||
rm -f /etc/systemd/system/lotus-daemon.service
|
||||
systemctl daemon-reload
|
||||
|
||||
clean-miner-service:
|
||||
-systemctl stop lotus-miner
|
||||
-systemctl disable lotus-miner
|
||||
rm -f /etc/systemd/system/lotus-miner.service
|
||||
systemctl daemon-reload
|
||||
|
||||
clean-chainwatch-service:
|
||||
-systemctl stop lotus-chainwatch
|
||||
-systemctl disable lotus-chainwatch
|
||||
rm -f /etc/systemd/system/lotus-chainwatch.service
|
||||
systemctl daemon-reload
|
||||
|
||||
clean-main-services: clean-daemon-service
|
||||
|
||||
clean-all-services: clean-main-services
|
||||
|
||||
clean-services: clean-all-services
|
||||
|
||||
# MISC
|
||||
|
||||
buildall: $(BINS)
|
||||
|
||||
completions:
|
||||
./scripts/make-completions.sh lotus
|
||||
./scripts/make-completions.sh lotus-miner
|
||||
.PHONY: completions
|
||||
|
||||
install-completions:
|
||||
mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/
|
||||
install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus
|
||||
install -C ./scripts/bash-completion/lotus-miner /usr/share/bash-completion/completions/lotus-miner
|
||||
install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus
|
||||
install -C ./scripts/zsh-completion/lotus-miner /usr/local/share/zsh/site-functions/_lotus-miner
|
||||
|
||||
clean:
|
||||
rm -rf $(CLEAN) $(BINS)
|
||||
-$(MAKE) -C $(FFI_PATH) clean
|
||||
@ -163,6 +278,15 @@ dist-clean:
|
||||
|
||||
type-gen:
|
||||
go run ./gen/main.go
|
||||
go generate ./...
|
||||
|
||||
method-gen:
|
||||
(cd ./lotuspond/front/src/chain && go run ./methodgen.go)
|
||||
|
||||
gen: type-gen method-gen
|
||||
|
||||
docsgen:
|
||||
go run ./api/docgen > documentation/en/api-methods.md
|
||||
|
||||
print-%:
|
||||
@echo $*=$($*)
|
||||
|
||||
45
README.md
45
README.md
@ -1,16 +1,47 @@
|
||||

|
||||
<p align="center">
|
||||
<a href="https://lotu.sh/" title="Lotus Docs">
|
||||
<img src="documentation/images/lotus_logo_h.png" alt="Project Lotus Logo" width="244" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
# Project Lotus - 莲
|
||||
<h1 align="center">Project Lotus - 莲</h1>
|
||||
|
||||
Lotus is an experimental implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs).
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/filecoin-project/lotus"><img src="https://circleci.com/gh/filecoin-project/lotus.svg?style=svg"></a>
|
||||
<a href="https://codecov.io/gh/filecoin-project/lotus"><img src="https://codecov.io/gh/filecoin-project/lotus/branch/master/graph/badge.svg"></a>
|
||||
<a href="https://goreportcard.com/report/github.com/filecoin-project/lotus"><img src="https://goreportcard.com/badge/github.com/filecoin-project/lotus" /></a>
|
||||
<a href=""><img src="https://img.shields.io/badge/golang-%3E%3D1.14.7-blue.svg" /></a>
|
||||
<br>
|
||||
</p>
|
||||
|
||||
## Development
|
||||
|
||||
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1).
|
||||
Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://spec.filecoin.io).
|
||||
|
||||
## Building & Documentation
|
||||
|
||||
For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
|
||||
For instructions on how to build lotus from source, please visit [Lotus build and setup instruction](https://docs.filecoin.io/get-started/lotus/installation/#minimal-requirements) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation).
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details.
|
||||
|
||||
## Development
|
||||
|
||||
The main branches under development at the moment are:
|
||||
* [`master`](https://github.com/filecoin-project/lotus): current testnet.
|
||||
* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes.
|
||||
* [`ntwk-calibration`](https://github.com/filecoin-project/lotus/tree/ntwk-calibration): devnet running one of `next` commits.
|
||||
|
||||
### Tracker
|
||||
|
||||
All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work towards Mainnet launch can be seen at the [lotus github project board](https://github.com/orgs/filecoin-project/projects/8). The issues labeled with `incentives` are there to identify the issues needed for Space Race launch.
|
||||
|
||||
### Packages
|
||||
|
||||
The lotus Filecoin implementation unfolds into the following packages:
|
||||
|
||||
- [This repo](https://github.com/filecoin-project/lotus)
|
||||
- [go-fil-markets](https://github.com/filecoin-project/go-fil-markets) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/markets-shared-components-5daa144a7046a60001c6e253/board)
|
||||
- [spec-actors](https://github.com/filecoin-project/specs-actors) which has its own [kanban work tracker available here](https://app.zenhub.com/workspaces/actors-5ee6f3aa87591f0016c05685/board)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
29
SECURITY.md
Normal file
29
SECURITY.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Security Policy
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
For *critical* bugs, please send an email to security@filecoin.org.
|
||||
|
||||
The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md).
|
||||
|
||||
Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report.
|
||||
|
||||
Here are some examples of bugs we would consider 'critical':
|
||||
|
||||
* If you can spend from a `multisig` wallet you do not control the keys for.
|
||||
* If you can cause a miner to be slashed without them actually misbehaving.
|
||||
* If you can maintain power without submitting windowed posts regularly.
|
||||
* If you can craft a message that causes lotus nodes to panic.
|
||||
* If you can cause your miner to win significantly more blocks than it should.
|
||||
* If you can craft a message that causes a persistent fork in the network.
|
||||
* If you can cause the total amount of Filecoin in the network to no longer be 2 billion.
|
||||
|
||||
This is not an exhaustive list, but should provide some idea of what we consider 'critical'.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
* TODO: This should be defined and set up by Mainnet launch.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| Testnet | :white_check_mark: |
|
||||
@ -4,31 +4,61 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
type Permission = string
|
||||
|
||||
type Common interface {
|
||||
// Auth
|
||||
AuthVerify(ctx context.Context, token string) ([]Permission, error)
|
||||
AuthNew(ctx context.Context, perms []Permission) ([]byte, error)
|
||||
|
||||
// network
|
||||
// MethodGroup: Auth
|
||||
|
||||
AuthVerify(ctx context.Context, token string) ([]auth.Permission, error)
|
||||
AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error)
|
||||
|
||||
// MethodGroup: Net
|
||||
|
||||
NetConnectedness(context.Context, peer.ID) (network.Connectedness, error)
|
||||
NetPeers(context.Context) ([]peer.AddrInfo, error)
|
||||
NetConnect(context.Context, peer.AddrInfo) error
|
||||
NetAddrsListen(context.Context) (peer.AddrInfo, error)
|
||||
NetDisconnect(context.Context, peer.ID) error
|
||||
NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error)
|
||||
NetPubsubScores(context.Context) ([]PubsubScore, error)
|
||||
NetAutoNatStatus(context.Context) (NatInfo, error)
|
||||
NetAgentVersion(ctx context.Context, p peer.ID) (string, error)
|
||||
|
||||
// NetBandwidthStats returns statistics about the nodes total bandwidth
|
||||
// usage and current rate across all peers and protocols.
|
||||
NetBandwidthStats(ctx context.Context) (metrics.Stats, error)
|
||||
|
||||
// NetBandwidthStatsByPeer returns statistics about the nodes bandwidth
|
||||
// usage and current rate per peer
|
||||
NetBandwidthStatsByPeer(ctx context.Context) (map[string]metrics.Stats, error)
|
||||
|
||||
// NetBandwidthStatsByProtocol returns statistics about the nodes bandwidth
|
||||
// usage and current rate per protocol
|
||||
NetBandwidthStatsByProtocol(ctx context.Context) (map[protocol.ID]metrics.Stats, error)
|
||||
|
||||
// MethodGroup: Common
|
||||
|
||||
// ID returns peerID of libp2p node backing this API
|
||||
ID(context.Context) (peer.ID, error)
|
||||
|
||||
// Version provides information about API provider
|
||||
Version(context.Context) (Version, error)
|
||||
|
||||
LogList(context.Context) ([]string, error)
|
||||
LogSetLevel(context.Context, string, string) error
|
||||
|
||||
// trigger graceful shutdown
|
||||
Shutdown(context.Context) error
|
||||
|
||||
Closing(context.Context) (<-chan struct{}, error)
|
||||
}
|
||||
|
||||
// Version provides various build-time information
|
||||
@ -50,3 +80,8 @@ type Version struct {
|
||||
func (v Version) String() string {
|
||||
return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String())
|
||||
}
|
||||
|
||||
type NatInfo struct {
|
||||
Reachability network.Reachability
|
||||
PublicAddr string
|
||||
}
|
||||
|
||||
776
api/api_full.go
776
api/api_full.go
@ -2,87 +2,292 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-filestore"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/market"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/power"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
marketevents "github.com/filecoin-project/lotus/markets/loggers"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
// FullNode API is a low-level interface to the Filecoin network full node
|
||||
type FullNode interface {
|
||||
Common
|
||||
|
||||
// chain
|
||||
// MethodGroup: Chain
|
||||
// The Chain method group contains methods for interacting with the
|
||||
// blockchain, but that do not require any form of state computation.
|
||||
|
||||
// ChainNotify returns channel with chain head updates
|
||||
// First message is guaranteed to be of len == 1, and type == 'current'
|
||||
ChainNotify(context.Context) (<-chan []*store.HeadChange, error)
|
||||
// ChainNotify returns channel with chain head updates.
|
||||
// First message is guaranteed to be of len == 1, and type == 'current'.
|
||||
ChainNotify(context.Context) (<-chan []*HeadChange, error)
|
||||
|
||||
// ChainHead returns the current head of the chain.
|
||||
ChainHead(context.Context) (*types.TipSet, error)
|
||||
ChainGetRandomness(context.Context, types.TipSetKey, int64) ([]byte, error)
|
||||
|
||||
// ChainGetRandomnessFromTickets is used to sample the chain for randomness.
|
||||
ChainGetRandomnessFromTickets(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||
|
||||
// ChainGetRandomnessFromBeacon is used to sample the beacon for randomness.
|
||||
ChainGetRandomnessFromBeacon(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error)
|
||||
|
||||
// ChainGetBlock returns the block specified by the given CID.
|
||||
ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error)
|
||||
// ChainGetTipSet returns the tipset specified by the given TipSetKey.
|
||||
ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error)
|
||||
ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error)
|
||||
ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error)
|
||||
ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error)
|
||||
ChainGetTipSetByHeight(context.Context, uint64, *types.TipSet) (*types.TipSet, error)
|
||||
|
||||
// ChainGetBlockMessages returns messages stored in the specified block.
|
||||
ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error)
|
||||
|
||||
// ChainGetParentReceipts returns receipts for messages in parent tipset of
|
||||
// the specified block.
|
||||
ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error)
|
||||
|
||||
// ChainGetParentMessages returns messages stored in parent tipset of the
|
||||
// specified block.
|
||||
ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error)
|
||||
|
||||
// ChainGetTipSetByHeight looks back for a tipset at the specified epoch.
|
||||
// If there are no blocks at the specified epoch, a tipset at an earlier epoch
|
||||
// will be returned.
|
||||
ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error)
|
||||
|
||||
// ChainReadObj reads ipld nodes referenced by the specified CID from chain
|
||||
// blockstore and returns raw bytes.
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
ChainSetHead(context.Context, *types.TipSet) error
|
||||
|
||||
// ChainDeleteObj deletes node referenced by the given CID
|
||||
ChainDeleteObj(context.Context, cid.Cid) error
|
||||
|
||||
// ChainHasObj checks if a given CID exists in the chain blockstore.
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
|
||||
// ChainStatObj returns statistics about the graph referenced by 'obj'.
|
||||
// If 'base' is also specified, then the returned stat will be a diff
|
||||
// between the two objects.
|
||||
ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (ObjStat, error)
|
||||
|
||||
// ChainSetHead forcefully sets current chain head. Use with caution.
|
||||
ChainSetHead(context.Context, types.TipSetKey) error
|
||||
|
||||
// ChainGetGenesis returns the genesis tipset.
|
||||
ChainGetGenesis(context.Context) (*types.TipSet, error)
|
||||
ChainTipSetWeight(context.Context, *types.TipSet) (types.BigInt, error)
|
||||
ChainGetNode(ctx context.Context, p string) (interface{}, error)
|
||||
|
||||
// ChainTipSetWeight computes weight for the specified tipset.
|
||||
ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error)
|
||||
ChainGetNode(ctx context.Context, p string) (*IpldObject, error)
|
||||
|
||||
// ChainGetMessage reads a message referenced by the specified CID from the
|
||||
// chain blockstore.
|
||||
ChainGetMessage(context.Context, cid.Cid) (*types.Message, error)
|
||||
|
||||
// syncer
|
||||
// ChainGetPath returns a set of revert/apply operations needed to get from
|
||||
// one tipset to another, for example:
|
||||
//```
|
||||
// to
|
||||
// ^
|
||||
// from tAA
|
||||
// ^ ^
|
||||
// tBA tAB
|
||||
// ^---*--^
|
||||
// ^
|
||||
// tRR
|
||||
//```
|
||||
// Would return `[revert(tBA), apply(tAB), apply(tAA)]`
|
||||
ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error)
|
||||
|
||||
// ChainExport returns a stream of bytes with CAR dump of chain data.
|
||||
// The exported chain data includes the header chain from the given tipset
|
||||
// back to genesis, the entire genesis state, and the most recent 'nroots'
|
||||
// state trees.
|
||||
// If oldmsgskip is set, messages from before the requested roots are also not included.
|
||||
ChainExport(ctx context.Context, nroots abi.ChainEpoch, oldmsgskip bool, tsk types.TipSetKey) (<-chan []byte, error)
|
||||
|
||||
// MethodGroup: Beacon
|
||||
// The Beacon method group contains methods for interacting with the random beacon (DRAND)
|
||||
|
||||
// BeaconGetEntry returns the beacon entry for the given filecoin epoch. If
|
||||
// the entry has not yet been produced, the call will block until the entry
|
||||
// becomes available
|
||||
BeaconGetEntry(ctx context.Context, epoch abi.ChainEpoch) (*types.BeaconEntry, error)
|
||||
|
||||
// GasEstimateFeeCap estimates gas fee cap
|
||||
GasEstimateFeeCap(context.Context, *types.Message, int64, types.TipSetKey) (types.BigInt, error)
|
||||
|
||||
// GasEstimateGasLimit estimates gas used by the message and returns it.
|
||||
// It fails if message fails to execute.
|
||||
GasEstimateGasLimit(context.Context, *types.Message, types.TipSetKey) (int64, error)
|
||||
|
||||
// GasEstimateGasPremium estimates what gas price should be used for a
|
||||
// message to have high likelihood of inclusion in `nblocksincl` epochs.
|
||||
|
||||
GasEstimateGasPremium(_ context.Context, nblocksincl uint64,
|
||||
sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error)
|
||||
|
||||
// GasEstimateMessageGas estimates gas values for unset message gas fields
|
||||
GasEstimateMessageGas(context.Context, *types.Message, *MessageSendSpec, types.TipSetKey) (*types.Message, error)
|
||||
|
||||
// MethodGroup: Sync
|
||||
// The Sync method group contains methods for interacting with and
|
||||
// observing the lotus sync service.
|
||||
|
||||
// SyncState returns the current status of the lotus sync system.
|
||||
SyncState(context.Context) (*SyncState, error)
|
||||
|
||||
// SyncSubmitBlock can be used to submit a newly created block to the.
|
||||
// network through this node
|
||||
SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error
|
||||
|
||||
// SyncIncomingBlocks returns a channel streaming incoming, potentially not
|
||||
// yet synced block headers.
|
||||
SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error)
|
||||
|
||||
// SyncCheckpoint marks a blocks as checkpointed, meaning that it won't ever fork away from it.
|
||||
SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) error
|
||||
|
||||
// SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced.
|
||||
// Use with extreme caution.
|
||||
SyncMarkBad(ctx context.Context, bcid cid.Cid) error
|
||||
|
||||
// messages
|
||||
MpoolPending(context.Context, *types.TipSet) ([]*types.SignedMessage, error)
|
||||
// SyncUnmarkBad unmarks a blocks as bad, making it possible to be validated and synced again.
|
||||
SyncUnmarkBad(ctx context.Context, bcid cid.Cid) error
|
||||
|
||||
// SyncCheckBad checks if a block was marked as bad, and if it was, returns
|
||||
// the reason.
|
||||
SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error)
|
||||
|
||||
// MethodGroup: Mpool
|
||||
// The Mpool methods are for interacting with the message pool. The message pool
|
||||
// manages all incoming and outgoing 'messages' going over the network.
|
||||
|
||||
// MpoolPending returns pending mempool messages.
|
||||
MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error)
|
||||
|
||||
// MpoolSelect returns a list of pending messages for inclusion in the next block
|
||||
MpoolSelect(context.Context, types.TipSetKey, float64) ([]*types.SignedMessage, error)
|
||||
|
||||
// MpoolPush pushes a signed message to mempool.
|
||||
MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error)
|
||||
MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) // get nonce, sign, push
|
||||
|
||||
// MpoolPushMessage atomically assigns a nonce, signs, and pushes a message
|
||||
// to mempool.
|
||||
// maxFee is only used when GasFeeCap/GasPremium fields aren't specified
|
||||
//
|
||||
// When maxFee is set to 0, MpoolPushMessage will guess appropriate fee
|
||||
// based on current chain conditions
|
||||
MpoolPushMessage(ctx context.Context, msg *types.Message, spec *MessageSendSpec) (*types.SignedMessage, error)
|
||||
|
||||
// MpoolGetNonce gets next nonce for the specified sender.
|
||||
// Note that this method may not be atomic. Use MpoolPushMessage instead.
|
||||
MpoolGetNonce(context.Context, address.Address) (uint64, error)
|
||||
MpoolSub(context.Context) (<-chan MpoolUpdate, error)
|
||||
|
||||
// FullNodeStruct
|
||||
// MpoolClear clears pending messages from the mpool
|
||||
MpoolClear(context.Context, bool) error
|
||||
|
||||
// miner
|
||||
// MpoolGetConfig returns (a copy of) the current mpool config
|
||||
MpoolGetConfig(context.Context) (*types.MpoolConfig, error)
|
||||
// MpoolSetConfig sets the mpool config to (a copy of) the supplied config
|
||||
MpoolSetConfig(context.Context, *types.MpoolConfig) error
|
||||
|
||||
MinerCreateBlock(context.Context, address.Address, *types.TipSet, *types.Ticket, *types.EPostProof, []*types.SignedMessage, uint64, uint64) (*types.BlockMsg, error)
|
||||
// MethodGroup: Miner
|
||||
|
||||
MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error)
|
||||
MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error)
|
||||
|
||||
// // UX ?
|
||||
|
||||
// wallet
|
||||
// MethodGroup: Wallet
|
||||
|
||||
WalletNew(context.Context, string) (address.Address, error)
|
||||
// WalletNew creates a new address in the wallet with the given sigType.
|
||||
WalletNew(context.Context, crypto.SigType) (address.Address, error)
|
||||
// WalletHas indicates whether the given address is in the wallet.
|
||||
WalletHas(context.Context, address.Address) (bool, error)
|
||||
// WalletList lists all the addresses in the wallet.
|
||||
WalletList(context.Context) ([]address.Address, error)
|
||||
// WalletBalance returns the balance of the given address at the current head of the chain.
|
||||
WalletBalance(context.Context, address.Address) (types.BigInt, error)
|
||||
WalletSign(context.Context, address.Address, []byte) (*types.Signature, error)
|
||||
// WalletSign signs the given bytes using the given address.
|
||||
WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error)
|
||||
// WalletSignMessage signs the given message using the given address.
|
||||
WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error)
|
||||
// WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid.
|
||||
// The address does not have to be in the wallet.
|
||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error)
|
||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||
WalletDefaultAddress(context.Context) (address.Address, error)
|
||||
// WalletSetDefault marks the given address as as the default one.
|
||||
WalletSetDefault(context.Context, address.Address) error
|
||||
// WalletExport returns the private key of an address in the wallet.
|
||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error)
|
||||
// WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet.
|
||||
WalletImport(context.Context, *types.KeyInfo) (address.Address, error)
|
||||
// WalletDelete deletes an address from the wallet.
|
||||
WalletDelete(context.Context, address.Address) error
|
||||
|
||||
// Other
|
||||
|
||||
// ClientImport imports file under the specified path into filestore
|
||||
ClientImport(ctx context.Context, path string) (cid.Cid, error)
|
||||
ClientStartDeal(ctx context.Context, data cid.Cid, addr address.Address, miner address.Address, epochPrice types.BigInt, blocksDuration uint64) (*cid.Cid, error)
|
||||
// MethodGroup: Client
|
||||
// The Client methods all have to do with interacting with the storage and
|
||||
// retrieval markets as a client
|
||||
|
||||
// ClientImport imports file under the specified path into filestore.
|
||||
ClientImport(ctx context.Context, ref FileRef) (*ImportRes, error)
|
||||
// ClientRemoveImport removes file import
|
||||
ClientRemoveImport(ctx context.Context, importID multistore.StoreID) error
|
||||
// ClientStartDeal proposes a deal with a miner.
|
||||
ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error)
|
||||
// ClientGetDealInfo returns the latest information about a given deal.
|
||||
ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error)
|
||||
// ClientListDeals returns information about the deals made by the local client.
|
||||
ClientListDeals(ctx context.Context) ([]DealInfo, error)
|
||||
// ClientGetDealUpdates returns the status of updated deals
|
||||
ClientGetDealUpdates(ctx context.Context) (<-chan DealInfo, error)
|
||||
// ClientHasLocal indicates whether a certain CID is locally stored.
|
||||
ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error)
|
||||
ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error)
|
||||
ClientRetrieve(ctx context.Context, order RetrievalOrder, path string) error
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*types.SignedStorageAsk, error)
|
||||
// ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer).
|
||||
ClientFindData(ctx context.Context, root cid.Cid, piece *cid.Cid) ([]QueryOffer, error)
|
||||
// ClientMinerQueryOffer returns a QueryOffer for the specific miner and file.
|
||||
ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error)
|
||||
// ClientRetrieve initiates the retrieval of a file, as specified in the order.
|
||||
ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error
|
||||
// ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel
|
||||
// of status updates.
|
||||
ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error)
|
||||
// ClientQueryAsk returns a signed StorageAsk from the specified miner.
|
||||
ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error)
|
||||
// ClientCalcCommP calculates the CommP for a specified file
|
||||
ClientCalcCommP(ctx context.Context, inpath string) (*CommPRet, error)
|
||||
// ClientGenCar generates a CAR file for the specified file.
|
||||
ClientGenCar(ctx context.Context, ref FileRef, outpath string) error
|
||||
// ClientDealSize calculates real deal data size
|
||||
ClientDealSize(ctx context.Context, root cid.Cid) (DataSize, error)
|
||||
// ClientListTransfers returns the status of all ongoing transfers of data
|
||||
ClientListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
||||
ClientDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
||||
// ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel
|
||||
// which are stuck due to insufficient funds
|
||||
ClientRetrieveTryRestartInsufficientFunds(ctx context.Context, paymentChannel address.Address) error
|
||||
|
||||
// ClientUnimport removes references to the specified file from filestore
|
||||
//ClientUnimport(path string)
|
||||
@ -92,77 +297,244 @@ type FullNode interface {
|
||||
|
||||
//ClientListAsks() []Ask
|
||||
|
||||
// if tipset is nil, we'll use heaviest
|
||||
StateCall(context.Context, *types.Message, *types.TipSet) (*types.MessageReceipt, error)
|
||||
StateReplay(context.Context, *types.TipSet, cid.Cid) (*ReplayResults, error)
|
||||
StateGetActor(ctx context.Context, actor address.Address, ts *types.TipSet) (*types.Actor, error)
|
||||
StateReadState(ctx context.Context, act *types.Actor, ts *types.TipSet) (*ActorState, error)
|
||||
StateListMessages(ctx context.Context, match *types.Message, ts *types.TipSet, toht uint64) ([]cid.Cid, error)
|
||||
// MethodGroup: State
|
||||
// The State methods are used to query, inspect, and interact with chain state.
|
||||
// All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset.
|
||||
// A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used.
|
||||
|
||||
StateMinerSectors(context.Context, address.Address, *types.TipSet) ([]*ChainSectorInfo, error)
|
||||
StateMinerProvingSet(context.Context, address.Address, *types.TipSet) ([]*ChainSectorInfo, error)
|
||||
StateMinerPower(context.Context, address.Address, *types.TipSet) (MinerPower, error)
|
||||
StateMinerWorker(context.Context, address.Address, *types.TipSet) (address.Address, error)
|
||||
StateMinerPeerID(ctx context.Context, m address.Address, ts *types.TipSet) (peer.ID, error)
|
||||
StateMinerElectionPeriodStart(ctx context.Context, actor address.Address, ts *types.TipSet) (uint64, error)
|
||||
StateMinerSectorSize(context.Context, address.Address, *types.TipSet) (uint64, error)
|
||||
StatePledgeCollateral(context.Context, *types.TipSet) (types.BigInt, error)
|
||||
StateWaitMsg(context.Context, cid.Cid) (*MsgWait, error)
|
||||
StateListMiners(context.Context, *types.TipSet) ([]address.Address, error)
|
||||
StateListActors(context.Context, *types.TipSet) ([]address.Address, error)
|
||||
StateMarketBalance(context.Context, address.Address, *types.TipSet) (actors.StorageParticipantBalance, error)
|
||||
StateMarketParticipants(context.Context, *types.TipSet) (map[string]actors.StorageParticipantBalance, error)
|
||||
StateMarketDeals(context.Context, *types.TipSet) (map[string]actors.OnChainDeal, error)
|
||||
StateMarketStorageDeal(context.Context, uint64, *types.TipSet) (*actors.OnChainDeal, error)
|
||||
StateLookupID(context.Context, address.Address, *types.TipSet) (address.Address, error)
|
||||
// StateCall runs the given message and returns its result without any persisted changes.
|
||||
StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error)
|
||||
// StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset.
|
||||
StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error)
|
||||
// StateGetActor returns the indicated actor's nonce and balance.
|
||||
StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error)
|
||||
// StateReadState returns the indicated actor's state.
|
||||
StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error)
|
||||
// StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height.
|
||||
StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error)
|
||||
|
||||
// StateNetworkName returns the name of the network the node is synced to
|
||||
StateNetworkName(context.Context) (dtypes.NetworkName, error)
|
||||
// StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included.
|
||||
StateMinerSectors(context.Context, address.Address, *bitfield.BitField, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
|
||||
// StateMinerActiveSectors returns info about sectors that a given miner is actively proving.
|
||||
StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error)
|
||||
// StateMinerProvingDeadline calculates the deadline at some epoch for a proving period
|
||||
// and returns the deadline-related calculations.
|
||||
StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error)
|
||||
// StateMinerPower returns the power of the indicated miner
|
||||
StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error)
|
||||
// StateMinerInfo returns info about the indicated miner
|
||||
StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error)
|
||||
// StateMinerDeadlines returns all the proving deadlines for the given miner
|
||||
StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]Deadline, error)
|
||||
// StateMinerPartitions returns all partitions in the specified deadline
|
||||
StateMinerPartitions(ctx context.Context, m address.Address, dlIdx uint64, tsk types.TipSetKey) ([]Partition, error)
|
||||
// StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner
|
||||
StateMinerFaults(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
||||
// StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset
|
||||
StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error)
|
||||
// StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner
|
||||
StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (bitfield.BitField, error)
|
||||
// StateMinerInitialPledgeCollateral returns the precommit deposit for the specified miner's sector
|
||||
StateMinerPreCommitDepositForPower(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||
// StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector
|
||||
StateMinerInitialPledgeCollateral(context.Context, address.Address, miner.SectorPreCommitInfo, types.TipSetKey) (types.BigInt, error)
|
||||
// StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent
|
||||
StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||
// StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector
|
||||
StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error)
|
||||
// StateSectorGetInfo returns the on-chain info for the specified miner's sector. Returns null in case the sector info isn't found
|
||||
// NOTE: returned info.Expiration may not be accurate in some cases, use StateSectorExpiration to get accurate
|
||||
// expiration epoch
|
||||
StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error)
|
||||
// StateSectorExpiration returns epoch at which given sector will expire
|
||||
StateSectorExpiration(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorExpiration, error)
|
||||
// StateSectorPartition finds deadline/partition with the specified sector
|
||||
StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error)
|
||||
// StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed
|
||||
StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error)
|
||||
// StateMsgGasCost searches for a message in the chain, and returns details of the messages gas costs, including the penalty and miner tip
|
||||
StateMsgGasCost(context.Context, cid.Cid, types.TipSetKey) (*MsgGasCost, error)
|
||||
// StateWaitMsg looks back in the chain for a message. If not found, it blocks until the
|
||||
// message arrives on chain, and gets to the indicated confidence depth.
|
||||
StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error)
|
||||
// StateListMiners returns the addresses of every miner that has claimed power in the Power Actor
|
||||
StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error)
|
||||
// StateListActors returns the addresses of every actor in the state
|
||||
StateListActors(context.Context, types.TipSetKey) ([]address.Address, error)
|
||||
// StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market
|
||||
StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error)
|
||||
// StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market
|
||||
StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error)
|
||||
// StateMarketDeals returns information about every deal in the Storage Market
|
||||
StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error)
|
||||
// StateMarketStorageDeal returns information about the indicated deal
|
||||
StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error)
|
||||
// StateLookupID retrieves the ID address of the given address
|
||||
StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
// StateAccountKey returns the public key address of the given ID address
|
||||
StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error)
|
||||
// StateChangedActors returns all the actors whose states change between the two given state CIDs
|
||||
// TODO: Should this take tipset keys instead?
|
||||
StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error)
|
||||
StateGetReceipt(context.Context, cid.Cid, *types.TipSet) (*types.MessageReceipt, error)
|
||||
StateMinerSectorCount(context.Context, address.Address, *types.TipSet) (MinerSectors, error)
|
||||
// StateGetReceipt returns the message receipt for the given message
|
||||
StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error)
|
||||
// StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set
|
||||
StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error)
|
||||
// StateCompute is a flexible command that applies the given messages on the given tipset.
|
||||
// The messages are run as though the VM were at the provided height.
|
||||
StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error)
|
||||
// StateVerifiedClientStatus returns the data cap for the given address.
|
||||
// Returns nil if there is no entry in the data cap table for the
|
||||
// address.
|
||||
StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error)
|
||||
// StateDealProviderCollateralBounds returns the min and max collateral a storage provider
|
||||
// can issue. It takes the deal size and verified status as parameters.
|
||||
StateDealProviderCollateralBounds(context.Context, abi.PaddedPieceSize, bool, types.TipSetKey) (DealCollateralBounds, error)
|
||||
|
||||
MarketEnsureAvailable(context.Context, address.Address, types.BigInt) error
|
||||
// StateCirculatingSupply returns the circulating supply of Filecoin at the given tipset
|
||||
StateCirculatingSupply(context.Context, types.TipSetKey) (CirculatingSupply, error)
|
||||
// StateNetworkVersion returns the network version at the given tipset
|
||||
StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error)
|
||||
|
||||
// MethodGroup: Msig
|
||||
// The Msig methods are used to interact with multisig wallets on the
|
||||
// filecoin network
|
||||
|
||||
// MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent
|
||||
MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error)
|
||||
// MsigGetVested returns the amount of FIL that vested in a multisig in a certain period.
|
||||
// It takes the following params: <multisig address>, <start epoch>, <end epoch>
|
||||
MsigGetVested(context.Context, address.Address, types.TipSetKey, types.TipSetKey) (types.BigInt, error)
|
||||
// MsigCreate creates a multisig wallet
|
||||
// It takes the following params: <required number of senders>, <approving addresses>, <unlock duration>
|
||||
//<initial balance>, <sender address of the create msg>, <gas price>
|
||||
MsigCreate(context.Context, uint64, []address.Address, abi.ChainEpoch, types.BigInt, address.Address, types.BigInt) (cid.Cid, error)
|
||||
// MsigPropose proposes a multisig message
|
||||
// It takes the following params: <multisig address>, <recipient address>, <value to transfer>,
|
||||
// <sender address of the propose msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||
MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
||||
// MsigApprove approves a previously-proposed multisig message
|
||||
// It takes the following params: <multisig address>, <proposed message ID>, <proposer address>, <recipient address>, <value to transfer>,
|
||||
// <sender address of the approve msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||
MsigApprove(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
||||
// MsigCancel cancels a previously-proposed multisig message
|
||||
// It takes the following params: <multisig address>, <proposed message ID>, <recipient address>, <value to transfer>,
|
||||
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
||||
// MsigAddPropose proposes adding a signer in the multisig
|
||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||
// <new signer>, <whether the number of required signers should be increased>
|
||||
MsigAddPropose(context.Context, address.Address, address.Address, address.Address, bool) (cid.Cid, error)
|
||||
// MsigAddApprove approves a previously proposed AddSigner message
|
||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||
// <proposer address>, <new signer>, <whether the number of required signers should be increased>
|
||||
MsigAddApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, bool) (cid.Cid, error)
|
||||
// MsigAddCancel cancels a previously proposed AddSigner message
|
||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||
// <new signer>, <whether the number of required signers should be increased>
|
||||
MsigAddCancel(context.Context, address.Address, address.Address, uint64, address.Address, bool) (cid.Cid, error)
|
||||
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||
// <old signer>, <new signer>
|
||||
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||
// MsigSwapApprove approves a previously proposed SwapSigner
|
||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||
// <proposer address>, <old signer>, <new signer>
|
||||
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||
// <old signer>, <new signer>
|
||||
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
|
||||
|
||||
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
||||
// MarketFreeBalance
|
||||
|
||||
PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*ChannelInfo, error)
|
||||
// MethodGroup: Paych
|
||||
// The Paych methods are for interacting with and managing payment channels
|
||||
|
||||
PaychGet(ctx context.Context, from, to address.Address, amt types.BigInt) (*ChannelInfo, error)
|
||||
PaychGetWaitReady(context.Context, cid.Cid) (address.Address, error)
|
||||
PaychAvailableFunds(ctx context.Context, ch address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*ChannelAvailableFunds, error)
|
||||
PaychList(context.Context) ([]address.Address, error)
|
||||
PaychStatus(context.Context, address.Address) (*PaychStatus, error)
|
||||
PaychClose(context.Context, address.Address) (cid.Cid, error)
|
||||
PaychSettle(context.Context, address.Address) (cid.Cid, error)
|
||||
PaychCollect(context.Context, address.Address) (cid.Cid, error)
|
||||
PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error)
|
||||
PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error)
|
||||
PaychVoucherCheckValid(context.Context, address.Address, *types.SignedVoucher) error
|
||||
PaychVoucherCheckSpendable(context.Context, address.Address, *types.SignedVoucher, []byte, []byte) (bool, error)
|
||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*types.SignedVoucher, error)
|
||||
PaychVoucherAdd(context.Context, address.Address, *types.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
||||
PaychVoucherList(context.Context, address.Address) ([]*types.SignedVoucher, error)
|
||||
PaychVoucherSubmit(context.Context, address.Address, *types.SignedVoucher) (cid.Cid, error)
|
||||
PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error
|
||||
PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error)
|
||||
PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*VoucherCreateResult, error)
|
||||
PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error)
|
||||
PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error)
|
||||
PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (cid.Cid, error)
|
||||
}
|
||||
|
||||
type FileRef struct {
|
||||
Path string
|
||||
IsCAR bool
|
||||
}
|
||||
|
||||
type MinerSectors struct {
|
||||
Pset uint64
|
||||
Sset uint64
|
||||
// Live sectors that should be proven.
|
||||
Live uint64
|
||||
// Sectors actively contributing to power.
|
||||
Active uint64
|
||||
// Sectors with failed proofs.
|
||||
Faulty uint64
|
||||
}
|
||||
|
||||
type ImportRes struct {
|
||||
Root cid.Cid
|
||||
ImportID multistore.StoreID
|
||||
}
|
||||
|
||||
type Import struct {
|
||||
Status filestore.Status
|
||||
Key cid.Cid
|
||||
Key multistore.StoreID
|
||||
Err string
|
||||
|
||||
Root *cid.Cid
|
||||
Source string
|
||||
FilePath string
|
||||
Size uint64
|
||||
}
|
||||
|
||||
type DealInfo struct {
|
||||
ProposalCid cid.Cid
|
||||
State DealState
|
||||
State storagemarket.StorageDealStatus
|
||||
Message string // more information about deal state, particularly errors
|
||||
Provider address.Address
|
||||
|
||||
PieceRef []byte // cid bytes
|
||||
DataRef *storagemarket.DataRef
|
||||
PieceCID cid.Cid
|
||||
Size uint64
|
||||
|
||||
PricePerEpoch types.BigInt
|
||||
Duration uint64
|
||||
|
||||
DealID abi.DealID
|
||||
|
||||
CreationTime time.Time
|
||||
}
|
||||
|
||||
type MsgWait struct {
|
||||
Receipt types.MessageReceipt
|
||||
TipSet *types.TipSet
|
||||
type MsgLookup struct {
|
||||
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||
Receipt types.MessageReceipt
|
||||
ReturnDec interface{}
|
||||
TipSet types.TipSetKey
|
||||
Height abi.ChainEpoch
|
||||
}
|
||||
|
||||
type MsgGasCost struct {
|
||||
Message cid.Cid // Can be different than requested, in case it was replaced, but only gas values changed
|
||||
GasUsed abi.TokenAmount
|
||||
BaseFeeBurn abi.TokenAmount
|
||||
OverEstimationBurn abi.TokenAmount
|
||||
MinerPenalty abi.TokenAmount
|
||||
MinerTip abi.TokenAmount
|
||||
Refund abi.TokenAmount
|
||||
TotalCost abi.TokenAmount
|
||||
}
|
||||
|
||||
type BlockMessages struct {
|
||||
@ -177,12 +549,6 @@ type Message struct {
|
||||
Message *types.Message
|
||||
}
|
||||
|
||||
type ChainSectorInfo struct {
|
||||
SectorID uint64
|
||||
CommD []byte
|
||||
CommR []byte
|
||||
}
|
||||
|
||||
type ActorState struct {
|
||||
Balance types.BigInt
|
||||
State interface{}
|
||||
@ -202,70 +568,147 @@ type PaychStatus struct {
|
||||
}
|
||||
|
||||
type ChannelInfo struct {
|
||||
Channel address.Address
|
||||
ChannelMessage cid.Cid
|
||||
Channel address.Address
|
||||
WaitSentinel cid.Cid
|
||||
}
|
||||
|
||||
type ChannelAvailableFunds struct {
|
||||
// Channel is the address of the channel
|
||||
Channel *address.Address
|
||||
// From is the from address of the channel (channel creator)
|
||||
From address.Address
|
||||
// To is the to address of the channel
|
||||
To address.Address
|
||||
// ConfirmedAmt is the amount of funds that have been confirmed on-chain
|
||||
// for the channel
|
||||
ConfirmedAmt types.BigInt
|
||||
// PendingAmt is the amount of funds that are pending confirmation on-chain
|
||||
PendingAmt types.BigInt
|
||||
// PendingWaitSentinel can be used with PaychGetWaitReady to wait for
|
||||
// confirmation of pending funds
|
||||
PendingWaitSentinel *cid.Cid
|
||||
// QueuedAmt is the amount that is queued up behind a pending request
|
||||
QueuedAmt types.BigInt
|
||||
// VoucherRedeemedAmt is the amount that is redeemed by vouchers on-chain
|
||||
// and in the local datastore
|
||||
VoucherReedeemedAmt types.BigInt
|
||||
}
|
||||
|
||||
type PaymentInfo struct {
|
||||
Channel address.Address
|
||||
ChannelMessage *cid.Cid
|
||||
Vouchers []*types.SignedVoucher
|
||||
Channel address.Address
|
||||
WaitSentinel cid.Cid
|
||||
Vouchers []*paych.SignedVoucher
|
||||
}
|
||||
|
||||
type VoucherSpec struct {
|
||||
Amount types.BigInt
|
||||
TimeLock uint64
|
||||
MinClose uint64
|
||||
Amount types.BigInt
|
||||
TimeLockMin abi.ChainEpoch
|
||||
TimeLockMax abi.ChainEpoch
|
||||
MinSettle abi.ChainEpoch
|
||||
|
||||
Extra *types.ModVerifyParams
|
||||
Extra *paych.ModVerifyParams
|
||||
}
|
||||
|
||||
// VoucherCreateResult is the response to calling PaychVoucherCreate
|
||||
type VoucherCreateResult struct {
|
||||
// Voucher that was created, or nil if there was an error or if there
|
||||
// were insufficient funds in the channel
|
||||
Voucher *paych.SignedVoucher
|
||||
// Shortfall is the additional amount that would be needed in the channel
|
||||
// in order to be able to create the voucher
|
||||
Shortfall types.BigInt
|
||||
}
|
||||
|
||||
type MinerPower struct {
|
||||
MinerPower types.BigInt
|
||||
TotalPower types.BigInt
|
||||
MinerPower power.Claim
|
||||
TotalPower power.Claim
|
||||
HasMinPower bool
|
||||
}
|
||||
|
||||
type QueryOffer struct {
|
||||
Err string
|
||||
|
||||
Root cid.Cid
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
|
||||
Size uint64
|
||||
MinPrice types.BigInt
|
||||
|
||||
Miner address.Address
|
||||
MinerPeerID peer.ID
|
||||
Size uint64
|
||||
MinPrice types.BigInt
|
||||
UnsealPrice types.BigInt
|
||||
PaymentInterval uint64
|
||||
PaymentIntervalIncrease uint64
|
||||
Miner address.Address
|
||||
MinerPeer retrievalmarket.RetrievalPeer
|
||||
}
|
||||
|
||||
func (o *QueryOffer) Order(client address.Address) RetrievalOrder {
|
||||
return RetrievalOrder{
|
||||
Root: o.Root,
|
||||
Size: o.Size,
|
||||
Total: o.MinPrice,
|
||||
Root: o.Root,
|
||||
Piece: o.Piece,
|
||||
Size: o.Size,
|
||||
Total: o.MinPrice,
|
||||
UnsealPrice: o.UnsealPrice,
|
||||
PaymentInterval: o.PaymentInterval,
|
||||
PaymentIntervalIncrease: o.PaymentIntervalIncrease,
|
||||
Client: client,
|
||||
|
||||
Client: client,
|
||||
|
||||
Miner: o.Miner,
|
||||
MinerPeerID: o.MinerPeerID,
|
||||
Miner: o.Miner,
|
||||
MinerPeer: o.MinerPeer,
|
||||
}
|
||||
}
|
||||
|
||||
type MarketBalance struct {
|
||||
Escrow big.Int
|
||||
Locked big.Int
|
||||
}
|
||||
|
||||
type MarketDeal struct {
|
||||
Proposal market.DealProposal
|
||||
State market.DealState
|
||||
}
|
||||
|
||||
type RetrievalOrder struct {
|
||||
// TODO: make this less unixfs specific
|
||||
Root cid.Cid
|
||||
Size uint64
|
||||
Root cid.Cid
|
||||
Piece *cid.Cid
|
||||
Size uint64
|
||||
// TODO: support offset
|
||||
Total types.BigInt
|
||||
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeerID peer.ID
|
||||
Total types.BigInt
|
||||
UnsealPrice types.BigInt
|
||||
PaymentInterval uint64
|
||||
PaymentIntervalIncrease uint64
|
||||
Client address.Address
|
||||
Miner address.Address
|
||||
MinerPeer retrievalmarket.RetrievalPeer
|
||||
}
|
||||
|
||||
type ReplayResults struct {
|
||||
Msg *types.Message
|
||||
Receipt *types.MessageReceipt
|
||||
Error string
|
||||
type InvocResult struct {
|
||||
Msg *types.Message
|
||||
MsgRct *types.MessageReceipt
|
||||
ExecutionTrace types.ExecutionTrace
|
||||
Error string
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
type MethodCall struct {
|
||||
types.MessageReceipt
|
||||
Error string
|
||||
}
|
||||
|
||||
type StartDealParams struct {
|
||||
Data *storagemarket.DataRef
|
||||
Wallet address.Address
|
||||
Miner address.Address
|
||||
EpochPrice types.BigInt
|
||||
MinBlocksDuration uint64
|
||||
ProviderCollateral big.Int
|
||||
DealStartEpoch abi.ChainEpoch
|
||||
FastRetrieval bool
|
||||
VerifiedDeal bool
|
||||
}
|
||||
|
||||
type IpldObject struct {
|
||||
Cid cid.Cid
|
||||
Obj interface{}
|
||||
}
|
||||
|
||||
type ActiveSync struct {
|
||||
@ -273,7 +716,7 @@ type ActiveSync struct {
|
||||
Target *types.TipSet
|
||||
|
||||
Stage SyncStateStage
|
||||
Height uint64
|
||||
Height abi.ChainEpoch
|
||||
|
||||
Start time.Time
|
||||
End time.Time
|
||||
@ -282,6 +725,8 @@ type ActiveSync struct {
|
||||
|
||||
type SyncState struct {
|
||||
ActiveSyncs []ActiveSync
|
||||
|
||||
VMApplied uint64
|
||||
}
|
||||
|
||||
type SyncStateStage int
|
||||
@ -293,8 +738,28 @@ const (
|
||||
StageMessages
|
||||
StageSyncComplete
|
||||
StageSyncErrored
|
||||
StageFetchingMessages
|
||||
)
|
||||
|
||||
func (v SyncStateStage) String() string {
|
||||
switch v {
|
||||
case StageHeaders:
|
||||
return "header sync"
|
||||
case StagePersistHeaders:
|
||||
return "persisting headers"
|
||||
case StageMessages:
|
||||
return "message sync"
|
||||
case StageSyncComplete:
|
||||
return "complete"
|
||||
case StageSyncErrored:
|
||||
return "error"
|
||||
case StageFetchingMessages:
|
||||
return "fetching messages"
|
||||
default:
|
||||
return fmt.Sprintf("<unknown: %d>", v)
|
||||
}
|
||||
}
|
||||
|
||||
type MpoolChange int
|
||||
|
||||
const (
|
||||
@ -306,3 +771,82 @@ type MpoolUpdate struct {
|
||||
Type MpoolChange
|
||||
Message *types.SignedMessage
|
||||
}
|
||||
|
||||
type ComputeStateOutput struct {
|
||||
Root cid.Cid
|
||||
Trace []*InvocResult
|
||||
}
|
||||
|
||||
type DealCollateralBounds struct {
|
||||
Min abi.TokenAmount
|
||||
Max abi.TokenAmount
|
||||
}
|
||||
|
||||
type CirculatingSupply struct {
|
||||
FilVested abi.TokenAmount
|
||||
FilMined abi.TokenAmount
|
||||
FilBurnt abi.TokenAmount
|
||||
FilLocked abi.TokenAmount
|
||||
FilCirculating abi.TokenAmount
|
||||
}
|
||||
|
||||
type MiningBaseInfo struct {
|
||||
MinerPower types.BigInt
|
||||
NetworkPower types.BigInt
|
||||
Sectors []builtin.SectorInfo
|
||||
WorkerKey address.Address
|
||||
SectorSize abi.SectorSize
|
||||
PrevBeaconEntry types.BeaconEntry
|
||||
BeaconEntries []types.BeaconEntry
|
||||
HasMinPower bool
|
||||
}
|
||||
|
||||
type BlockTemplate struct {
|
||||
Miner address.Address
|
||||
Parents types.TipSetKey
|
||||
Ticket *types.Ticket
|
||||
Eproof *types.ElectionProof
|
||||
BeaconValues []types.BeaconEntry
|
||||
Messages []*types.SignedMessage
|
||||
Epoch abi.ChainEpoch
|
||||
Timestamp uint64
|
||||
WinningPoStProof []builtin.PoStProof
|
||||
}
|
||||
|
||||
type DataSize struct {
|
||||
PayloadSize int64
|
||||
PieceSize abi.PaddedPieceSize
|
||||
}
|
||||
|
||||
type CommPRet struct {
|
||||
Root cid.Cid
|
||||
Size abi.UnpaddedPieceSize
|
||||
}
|
||||
type HeadChange struct {
|
||||
Type string
|
||||
Val *types.TipSet
|
||||
}
|
||||
|
||||
type MsigProposeResponse int
|
||||
|
||||
const (
|
||||
MsigApprove MsigProposeResponse = iota
|
||||
MsigCancel
|
||||
)
|
||||
|
||||
type Deadline struct {
|
||||
PostSubmissions bitfield.BitField
|
||||
}
|
||||
|
||||
type Partition struct {
|
||||
AllSectors bitfield.BitField
|
||||
FaultySectors bitfield.BitField
|
||||
RecoveringSectors bitfield.BitField
|
||||
LiveSectors bitfield.BitField
|
||||
ActiveSectors bitfield.BitField
|
||||
}
|
||||
|
||||
type Fault struct {
|
||||
Miner address.Address
|
||||
Epoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
@ -1,112 +1,182 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/go-fil-markets/piecestore"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/fsutil"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
)
|
||||
|
||||
// alias because cbor-gen doesn't like non-alias types
|
||||
type SectorState = uint64
|
||||
|
||||
const (
|
||||
UndefinedSectorState SectorState = iota
|
||||
|
||||
Empty // TODO: Is this useful
|
||||
Packing // sector not in sealStore, and not on chain
|
||||
|
||||
Unsealed // sealing / queued
|
||||
PreCommitting // on chain pre-commit
|
||||
PreCommitted // waiting for seed
|
||||
Committing
|
||||
CommitWait // waiting for message to land on chain
|
||||
Proving
|
||||
|
||||
SealFailed
|
||||
PreCommitFailed
|
||||
SealCommitFailed
|
||||
CommitFailed
|
||||
|
||||
FailedUnrecoverable
|
||||
|
||||
Faulty // sector is corrupted or gone for some reason
|
||||
FaultReported // sector has been declared as a fault on chain
|
||||
FaultedFinal // fault declared on chain
|
||||
)
|
||||
|
||||
var SectorStates = []string{
|
||||
UndefinedSectorState: "UndefinedSectorState",
|
||||
Empty: "Empty",
|
||||
Packing: "Packing",
|
||||
Unsealed: "Unsealed",
|
||||
PreCommitting: "PreCommitting",
|
||||
PreCommitted: "PreCommitted",
|
||||
Committing: "Committing",
|
||||
CommitWait: "CommitWait",
|
||||
Proving: "Proving",
|
||||
|
||||
SealFailed: "SealFailed",
|
||||
PreCommitFailed: "PreCommitFailed",
|
||||
SealCommitFailed: "SealCommitFailed",
|
||||
CommitFailed: "CommitFailed",
|
||||
|
||||
FailedUnrecoverable: "FailedUnrecoverable",
|
||||
|
||||
Faulty: "Faulty",
|
||||
FaultReported: "FaultReported",
|
||||
FaultedFinal: "FaultedFinal",
|
||||
}
|
||||
|
||||
// StorageMiner is a low-level interface to the Filecoin network storage miner node
|
||||
type StorageMiner interface {
|
||||
Common
|
||||
|
||||
ActorAddress(context.Context) (address.Address, error)
|
||||
|
||||
ActorSectorSize(context.Context, address.Address) (uint64, error)
|
||||
ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error)
|
||||
|
||||
MiningBase(context.Context) (*types.TipSet, error)
|
||||
|
||||
// Temp api for testing
|
||||
PledgeSector(context.Context) error
|
||||
|
||||
// Get the status of a given sector by ID
|
||||
SectorsStatus(context.Context, uint64) (SectorInfo, error)
|
||||
SectorsStatus(ctx context.Context, sid abi.SectorNumber, showOnChainInfo bool) (SectorInfo, error)
|
||||
|
||||
// List all staged sectors
|
||||
SectorsList(context.Context) ([]uint64, error)
|
||||
SectorsList(context.Context) ([]abi.SectorNumber, error)
|
||||
|
||||
SectorsRefs(context.Context) (map[string][]SealedRef, error)
|
||||
|
||||
SectorsUpdate(context.Context, uint64, SectorState) error
|
||||
// SectorStartSealing can be called on sectors in Empty or WaitDeals states
|
||||
// to trigger sealing early
|
||||
SectorStartSealing(context.Context, abi.SectorNumber) error
|
||||
// SectorSetSealDelay sets the time that a newly-created sector
|
||||
// waits for more deals before it starts sealing
|
||||
SectorSetSealDelay(context.Context, time.Duration) error
|
||||
// SectorGetSealDelay gets the time that a newly-created sector
|
||||
// waits for more deals before it starts sealing
|
||||
SectorGetSealDelay(context.Context) (time.Duration, error)
|
||||
// SectorSetExpectedSealDuration sets the expected time for a sector to seal
|
||||
SectorSetExpectedSealDuration(context.Context, time.Duration) error
|
||||
// SectorGetExpectedSealDuration gets the expected time for a sector to seal
|
||||
SectorGetExpectedSealDuration(context.Context) (time.Duration, error)
|
||||
SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error
|
||||
SectorRemove(context.Context, abi.SectorNumber) error
|
||||
SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error
|
||||
|
||||
WorkerStats(context.Context) (sectorbuilder.WorkerStats, error)
|
||||
StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error)
|
||||
StorageLocal(ctx context.Context) (map[stores.ID]string, error)
|
||||
StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error)
|
||||
|
||||
// WorkerQueue registers a remote worker
|
||||
WorkerQueue(context.Context, sectorbuilder.WorkerCfg) (<-chan sectorbuilder.WorkerTask, error)
|
||||
// WorkerConnect tells the node to connect to workers RPC
|
||||
WorkerConnect(context.Context, string) error
|
||||
WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error)
|
||||
WorkerJobs(context.Context) (map[uint64][]storiface.WorkerJob, error)
|
||||
|
||||
WorkerDone(ctx context.Context, task uint64, res sectorbuilder.SealRes) error
|
||||
// SealingSchedDiag dumps internal sealing scheduler state
|
||||
SealingSchedDiag(context.Context) (interface{}, error)
|
||||
|
||||
stores.SectorIndex
|
||||
|
||||
MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error
|
||||
MarketListDeals(ctx context.Context) ([]MarketDeal, error)
|
||||
MarketListRetrievalDeals(ctx context.Context) ([]retrievalmarket.ProviderDealState, error)
|
||||
MarketGetDealUpdates(ctx context.Context) (<-chan storagemarket.MinerDeal, error)
|
||||
MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error)
|
||||
MarketSetAsk(ctx context.Context, price types.BigInt, verifiedPrice types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error
|
||||
MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error)
|
||||
MarketSetRetrievalAsk(ctx context.Context, rask *retrievalmarket.Ask) error
|
||||
MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error)
|
||||
MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error)
|
||||
MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error)
|
||||
|
||||
DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error
|
||||
DealsList(ctx context.Context) ([]MarketDeal, error)
|
||||
DealsConsiderOnlineStorageDeals(context.Context) (bool, error)
|
||||
DealsSetConsiderOnlineStorageDeals(context.Context, bool) error
|
||||
DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error)
|
||||
DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error
|
||||
DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error)
|
||||
DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error
|
||||
DealsConsiderOfflineStorageDeals(context.Context) (bool, error)
|
||||
DealsSetConsiderOfflineStorageDeals(context.Context, bool) error
|
||||
DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error)
|
||||
DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error
|
||||
|
||||
StorageAddLocal(ctx context.Context, path string) error
|
||||
|
||||
PiecesListPieces(ctx context.Context) ([]cid.Cid, error)
|
||||
PiecesListCidInfos(ctx context.Context) ([]cid.Cid, error)
|
||||
PiecesGetPieceInfo(ctx context.Context, pieceCid cid.Cid) (*piecestore.PieceInfo, error)
|
||||
PiecesGetCIDInfo(ctx context.Context, payloadCid cid.Cid) (*piecestore.CIDInfo, error)
|
||||
}
|
||||
|
||||
type SealRes struct {
|
||||
Err string
|
||||
GoErr error `json:"-"`
|
||||
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
type SectorLog struct {
|
||||
Kind string
|
||||
Timestamp uint64
|
||||
|
||||
Trace string
|
||||
|
||||
Message string
|
||||
}
|
||||
|
||||
type SectorInfo struct {
|
||||
SectorID uint64
|
||||
State SectorState
|
||||
CommD []byte
|
||||
CommR []byte
|
||||
Proof []byte
|
||||
Deals []uint64
|
||||
Ticket sectorbuilder.SealTicket
|
||||
Seed sectorbuilder.SealSeed
|
||||
Retries uint64
|
||||
SectorID abi.SectorNumber
|
||||
State SectorState
|
||||
CommD *cid.Cid
|
||||
CommR *cid.Cid
|
||||
Proof []byte
|
||||
Deals []abi.DealID
|
||||
Ticket SealTicket
|
||||
Seed SealSeed
|
||||
PreCommitMsg *cid.Cid
|
||||
CommitMsg *cid.Cid
|
||||
Retries uint64
|
||||
ToUpgrade bool
|
||||
|
||||
LastErr string
|
||||
|
||||
Log []SectorLog
|
||||
|
||||
// On Chain Info
|
||||
SealProof abi.RegisteredSealProof // The seal proof type implies the PoSt proof/s
|
||||
Activation abi.ChainEpoch // Epoch during which the sector proof was accepted
|
||||
Expiration abi.ChainEpoch // Epoch during which the sector expires
|
||||
DealWeight abi.DealWeight // Integral of active deals over sector lifetime
|
||||
VerifiedDealWeight abi.DealWeight // Integral of active verified deals over sector lifetime
|
||||
InitialPledge abi.TokenAmount // Pledge collected to commit this sector
|
||||
// Expiration Info
|
||||
OnTime abi.ChainEpoch
|
||||
// non-zero if sector is faulty, epoch at which it will be permanently
|
||||
// removed if it doesn't recover
|
||||
Early abi.ChainEpoch
|
||||
}
|
||||
|
||||
type SealedRef struct {
|
||||
SectorID uint64
|
||||
Offset uint64
|
||||
Size uint64
|
||||
SectorID abi.SectorNumber
|
||||
Offset abi.PaddedPieceSize
|
||||
Size abi.UnpaddedPieceSize
|
||||
}
|
||||
|
||||
type SealedRefs struct {
|
||||
Refs []SealedRef
|
||||
}
|
||||
|
||||
type SealTicket struct {
|
||||
Value abi.SealRandomness
|
||||
Epoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
type SealSeed struct {
|
||||
Value abi.InteractiveSealRandomness
|
||||
Epoch abi.ChainEpoch
|
||||
}
|
||||
|
||||
func (st *SealTicket) Equals(ost *SealTicket) bool {
|
||||
return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch
|
||||
}
|
||||
|
||||
func (st *SealSeed) Equals(ost *SealSeed) bool {
|
||||
return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch
|
||||
}
|
||||
|
||||
type SectorState string
|
||||
|
||||
103
api/api_test.go
Normal file
103
api/api_test.go
Normal file
@ -0,0 +1,103 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func goCmd() string {
|
||||
var exeSuffix string
|
||||
if runtime.GOOS == "windows" {
|
||||
exeSuffix = ".exe"
|
||||
}
|
||||
path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return path
|
||||
}
|
||||
return "go"
|
||||
}
|
||||
|
||||
func TestDoesntDependOnFFI(t *testing.T) {
|
||||
deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, pkg := range strings.Fields(string(deps)) {
|
||||
if pkg == "github.com/filecoin-project/filecoin-ffi" {
|
||||
t.Fatal("api depends on filecoin-ffi")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReturnTypes(t *testing.T) {
|
||||
errType := reflect.TypeOf(new(error)).Elem()
|
||||
bareIface := reflect.TypeOf(new(interface{})).Elem()
|
||||
jmarsh := reflect.TypeOf(new(json.Marshaler)).Elem()
|
||||
|
||||
tst := func(api interface{}) func(t *testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ra := reflect.TypeOf(api).Elem()
|
||||
for i := 0; i < ra.NumMethod(); i++ {
|
||||
m := ra.Method(i)
|
||||
switch m.Type.NumOut() {
|
||||
case 1: // if 1 return value, it must be an error
|
||||
require.Equal(t, errType, m.Type.Out(0), m.Name)
|
||||
|
||||
case 2: // if 2 return values, first cant be an interface/function, second must be an error
|
||||
seen := map[reflect.Type]struct{}{}
|
||||
todo := []reflect.Type{m.Type.Out(0)}
|
||||
for len(todo) > 0 {
|
||||
typ := todo[len(todo)-1]
|
||||
todo = todo[:len(todo)-1]
|
||||
|
||||
if _, ok := seen[typ]; ok {
|
||||
continue
|
||||
}
|
||||
seen[typ] = struct{}{}
|
||||
|
||||
if typ.Kind() == reflect.Interface && typ != bareIface && !typ.Implements(jmarsh) {
|
||||
t.Error("methods can't return interfaces", m.Name)
|
||||
}
|
||||
|
||||
switch typ.Kind() {
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
fallthrough
|
||||
case reflect.Slice:
|
||||
fallthrough
|
||||
case reflect.Chan:
|
||||
todo = append(todo, typ.Elem())
|
||||
case reflect.Map:
|
||||
todo = append(todo, typ.Elem())
|
||||
todo = append(todo, typ.Key())
|
||||
case reflect.Struct:
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
todo = append(todo, typ.Field(i).Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.NotEqual(t, reflect.Func.String(), m.Type.Out(0).Kind().String(), m.Name)
|
||||
require.Equal(t, errType, m.Type.Out(1), m.Name)
|
||||
|
||||
default:
|
||||
t.Error("methods can only have 1 or 2 return values", m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("common", tst(new(Common)))
|
||||
t.Run("full", tst(new(FullNode)))
|
||||
t.Run("miner", tst(new(StorageMiner)))
|
||||
t.Run("worker", tst(new(WorkerAPI)))
|
||||
}
|
||||
40
api/api_worker.go
Normal file
40
api/api_worker.go
Normal file
@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/sealtasks"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/stores"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/storiface"
|
||||
"github.com/filecoin-project/specs-storage/storage"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
)
|
||||
|
||||
type WorkerAPI interface {
|
||||
Version(context.Context) (build.Version, error)
|
||||
// TODO: Info() (name, ...) ?
|
||||
|
||||
TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight
|
||||
Paths(context.Context) ([]stores.StoragePath, error)
|
||||
Info(context.Context) (storiface.WorkerInfo, error)
|
||||
|
||||
AddPiece(ctx context.Context, sector abi.SectorID, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (abi.PieceInfo, error)
|
||||
|
||||
storage.Sealer
|
||||
|
||||
MoveStorage(ctx context.Context, sector abi.SectorID, types stores.SectorFileType) error
|
||||
|
||||
UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error
|
||||
ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) (bool, error)
|
||||
|
||||
StorageAddLocal(ctx context.Context, path string) error
|
||||
|
||||
Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error
|
||||
|
||||
Closing(context.Context) (<-chan struct{}, error)
|
||||
}
|
||||
68
api/apibstore/apibstore.go
Normal file
68
api/apibstore/apibstore.go
Normal file
@ -0,0 +1,68 @@
|
||||
package apibstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/blockstore"
|
||||
)
|
||||
|
||||
type ChainIO interface {
|
||||
ChainReadObj(context.Context, cid.Cid) ([]byte, error)
|
||||
ChainHasObj(context.Context, cid.Cid) (bool, error)
|
||||
}
|
||||
|
||||
type apiBStore struct {
|
||||
api ChainIO
|
||||
}
|
||||
|
||||
func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore {
|
||||
return &apiBStore{
|
||||
api: cio,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *apiBStore) DeleteBlock(cid.Cid) error {
|
||||
return xerrors.New("not supported")
|
||||
}
|
||||
|
||||
func (a *apiBStore) Has(c cid.Cid) (bool, error) {
|
||||
return a.api.ChainHasObj(context.TODO(), c)
|
||||
}
|
||||
|
||||
func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) {
|
||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewBlockWithCid(bb, c)
|
||||
}
|
||||
|
||||
func (a *apiBStore) GetSize(c cid.Cid) (int, error) {
|
||||
bb, err := a.api.ChainReadObj(context.TODO(), c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(bb), nil
|
||||
}
|
||||
|
||||
func (a *apiBStore) Put(blocks.Block) error {
|
||||
return xerrors.New("not supported")
|
||||
}
|
||||
|
||||
func (a *apiBStore) PutMany([]blocks.Block) error {
|
||||
return xerrors.New("not supported")
|
||||
}
|
||||
|
||||
func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
|
||||
return nil, xerrors.New("not supported")
|
||||
}
|
||||
|
||||
func (a *apiBStore) HashOnRead(enabled bool) {
|
||||
return
|
||||
}
|
||||
|
||||
var _ blockstore.Blockstore = &apiBStore{}
|
||||
@ -1,105 +1,38 @@
|
||||
package apistruct
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
type permKey int
|
||||
|
||||
var permCtxKey permKey
|
||||
|
||||
const (
|
||||
// When changing these, update docs/API.md too
|
||||
|
||||
PermRead api.Permission = "read" // default
|
||||
PermWrite api.Permission = "write"
|
||||
PermSign api.Permission = "sign" // Use wallet keys for signing
|
||||
PermAdmin api.Permission = "admin" // Manage permissions
|
||||
PermRead auth.Permission = "read" // default
|
||||
PermWrite auth.Permission = "write"
|
||||
PermSign auth.Permission = "sign" // Use wallet keys for signing
|
||||
PermAdmin auth.Permission = "admin" // Manage permissions
|
||||
)
|
||||
|
||||
var AllPermissions = []api.Permission{PermRead, PermWrite, PermSign, PermAdmin}
|
||||
var defaultPerms = []api.Permission{PermRead}
|
||||
|
||||
func WithPerm(ctx context.Context, perms []api.Permission) context.Context {
|
||||
return context.WithValue(ctx, permCtxKey, perms)
|
||||
}
|
||||
var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin}
|
||||
var DefaultPerms = []auth.Permission{PermRead}
|
||||
|
||||
func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner {
|
||||
var out StorageMinerStruct
|
||||
permissionedAny(a, &out.Internal)
|
||||
permissionedAny(a, &out.CommonStruct.Internal)
|
||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
|
||||
return &out
|
||||
}
|
||||
|
||||
func PermissionedFullAPI(a api.FullNode) api.FullNode {
|
||||
var out FullNodeStruct
|
||||
permissionedAny(a, &out.Internal)
|
||||
permissionedAny(a, &out.CommonStruct.Internal)
|
||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal)
|
||||
return &out
|
||||
}
|
||||
|
||||
func HasPerm(ctx context.Context, perm api.Permission) bool {
|
||||
callerPerms, ok := ctx.Value(permCtxKey).([]api.Permission)
|
||||
if !ok {
|
||||
callerPerms = defaultPerms
|
||||
}
|
||||
|
||||
for _, callerPerm := range callerPerms {
|
||||
if callerPerm == perm {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func permissionedAny(in interface{}, out interface{}) {
|
||||
rint := reflect.ValueOf(out).Elem()
|
||||
ra := reflect.ValueOf(in)
|
||||
|
||||
for f := 0; f < rint.NumField(); f++ {
|
||||
field := rint.Type().Field(f)
|
||||
requiredPerm := api.Permission(field.Tag.Get("perm"))
|
||||
if requiredPerm == "" {
|
||||
panic("missing 'perm' tag on " + field.Name) // ok
|
||||
}
|
||||
|
||||
// Validate perm tag
|
||||
ok := false
|
||||
for _, perm := range AllPermissions {
|
||||
if requiredPerm == perm {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
panic("unknown 'perm' tag on " + field.Name) // ok
|
||||
}
|
||||
|
||||
fn := ra.MethodByName(field.Name)
|
||||
|
||||
rint.Field(f).Set(reflect.MakeFunc(field.Type, func(args []reflect.Value) (results []reflect.Value) {
|
||||
ctx := args[0].Interface().(context.Context)
|
||||
if HasPerm(ctx, requiredPerm) {
|
||||
return fn.Call(args)
|
||||
}
|
||||
|
||||
err := xerrors.Errorf("missing permission to invoke '%s' (need '%s')", field.Name, requiredPerm)
|
||||
rerr := reflect.ValueOf(&err).Elem()
|
||||
|
||||
if field.Type.NumOut() == 2 {
|
||||
return []reflect.Value{
|
||||
reflect.Zero(field.Type.Out(0)),
|
||||
rerr,
|
||||
}
|
||||
} else {
|
||||
return []reflect.Value{rerr}
|
||||
}
|
||||
}))
|
||||
|
||||
}
|
||||
func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
|
||||
var out WorkerStruct
|
||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
||||
return &out
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
9
api/apistruct/struct_test.go
Normal file
9
api/apistruct/struct_test.go
Normal file
@ -0,0 +1,9 @@
|
||||
package apistruct
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestPermTags(t *testing.T) {
|
||||
_ = PermissionedFullAPI(&FullNodeStruct{})
|
||||
_ = PermissionedStorMinerAPI(&StorageMinerStruct{})
|
||||
_ = PermissionedWorkerAPI(&WorkerStruct{})
|
||||
}
|
||||
770
api/cbor_gen.go
770
api/cbor_gen.go
@ -1,16 +1,17 @@
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
abi "github.com/filecoin-project/go-state-types/abi"
|
||||
paych "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
|
||||
|
||||
var _ = xerrors.Errorf
|
||||
|
||||
func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
@ -22,11 +23,17 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Channel (address.Address) (struct)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Channel")))); err != nil {
|
||||
if len("Channel") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Channel\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Channel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("Channel")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Channel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -34,33 +41,39 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.ChannelMessage (cid.Cid) (struct)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("ChannelMessage")))); err != nil {
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
if len("WaitSentinel") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"WaitSentinel\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("WaitSentinel"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("ChannelMessage")); err != nil {
|
||||
if _, err := io.WriteString(w, string("WaitSentinel")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.ChannelMessage == nil {
|
||||
if _, err := w.Write(cbg.CborNull); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteCid(w, *t.ChannelMessage); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.ChannelMessage: %w", err)
|
||||
}
|
||||
if err := cbg.WriteCidBuf(scratch, w, t.WaitSentinel); err != nil {
|
||||
return xerrors.Errorf("failed to write cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
// t.Vouchers ([]*types.SignedVoucher) (slice)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Vouchers")))); err != nil {
|
||||
// t.Vouchers ([]*paych.SignedVoucher) (slice)
|
||||
if len("Vouchers") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Vouchers\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("Vouchers")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Vouchers")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Vouchers)))); err != nil {
|
||||
if len(t.Vouchers) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Vouchers was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Vouchers {
|
||||
@ -72,9 +85,12 @@ func (t *PaymentInfo) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
br := cbg.GetPeeker(r)
|
||||
*t = PaymentInfo{}
|
||||
|
||||
maj, extra, err := cbg.CborReadHeader(br)
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -82,114 +98,85 @@ func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
// t.Channel (address.Address) (struct)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
if name != "Channel" {
|
||||
return fmt.Errorf("expected struct map entry %s to be Channel", name)
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
if err := t.Channel.UnmarshalCBOR(br); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
// t.ChannelMessage (cid.Cid) (struct)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
if name != "ChannelMessage" {
|
||||
return fmt.Errorf("expected struct map entry %s to be ChannelMessage", name)
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
pb, err := br.PeekByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pb == cbg.CborNull[0] {
|
||||
var nbuf [1]byte
|
||||
if _, err := br.Read(nbuf[:]); err != nil {
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Channel (address.Address) (struct)
|
||||
case "Channel":
|
||||
|
||||
{
|
||||
|
||||
if err := t.Channel.UnmarshalCBOR(br); err != nil {
|
||||
return xerrors.Errorf("unmarshaling t.Channel: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
// t.WaitSentinel (cid.Cid) (struct)
|
||||
case "WaitSentinel":
|
||||
|
||||
{
|
||||
|
||||
c, err := cbg.ReadCid(br)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.WaitSentinel: %w", err)
|
||||
}
|
||||
|
||||
t.WaitSentinel = c
|
||||
|
||||
}
|
||||
// t.Vouchers ([]*paych.SignedVoucher) (slice)
|
||||
case "Vouchers":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to read cid field t.ChannelMessage: %w", err)
|
||||
return err
|
||||
}
|
||||
|
||||
t.ChannelMessage = &c
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Vouchers: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Vouchers = make([]*paych.SignedVoucher, extra)
|
||||
}
|
||||
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v paych.SignedVoucher
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Vouchers[i] = &v
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||
}
|
||||
|
||||
}
|
||||
// t.Vouchers ([]*types.SignedVoucher) (slice)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
if name != "Vouchers" {
|
||||
return fmt.Errorf("expected struct map entry %s to be Vouchers", name)
|
||||
}
|
||||
|
||||
maj, extra, err = cbg.CborReadHeader(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Vouchers: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
if extra > 0 {
|
||||
t.Vouchers = make([]*types.SignedVoucher, extra)
|
||||
}
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v types.SignedVoucher
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Vouchers[i] = &v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
@ -199,48 +186,66 @@ func (t *SealedRef) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.SectorID (uint64) (uint64)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("SectorID")))); err != nil {
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
if len("SectorID") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"SectorID\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("SectorID")); err != nil {
|
||||
if _, err := io.WriteString(w, string("SectorID")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Offset (uint64) (uint64)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Offset")))); err != nil {
|
||||
// t.Offset (abi.PaddedPieceSize) (uint64)
|
||||
if len("Offset") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Offset\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("Offset")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Offset")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Offset))); err != nil {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Size (uint64) (uint64)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Size")))); err != nil {
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
if len("Size") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Size\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Size"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("Size")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Size")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
br := cbg.GetPeeker(r)
|
||||
*t = SealedRef{}
|
||||
|
||||
maj, extra, err := cbg.CborReadHeader(br)
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -248,84 +253,78 @@ func (t *SealedRef) UnmarshalCBOR(r io.Reader) error {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra != 3 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("SealedRef: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
// t.SectorID (uint64) (uint64)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
switch name {
|
||||
// t.SectorID (abi.SectorNumber) (uint64)
|
||||
case "SectorID":
|
||||
|
||||
if name != "SectorID" {
|
||||
return fmt.Errorf("expected struct map entry %s to be SectorID", name)
|
||||
}
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeader(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.SectorID = uint64(extra)
|
||||
// t.Offset (uint64) (uint64)
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.SectorID = abi.SectorNumber(extra)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Offset (abi.PaddedPieceSize) (uint64)
|
||||
case "Offset":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.Offset = abi.PaddedPieceSize(extra)
|
||||
|
||||
}
|
||||
// t.Size (abi.UnpaddedPieceSize) (uint64)
|
||||
case "Size":
|
||||
|
||||
{
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.Size = abi.UnpaddedPieceSize(extra)
|
||||
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
if name != "Offset" {
|
||||
return fmt.Errorf("expected struct map entry %s to be Offset", name)
|
||||
}
|
||||
|
||||
maj, extra, err = cbg.CborReadHeader(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.Offset = uint64(extra)
|
||||
// t.Size (uint64) (uint64)
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
if name != "Size" {
|
||||
return fmt.Errorf("expected struct map entry %s to be Size", name)
|
||||
}
|
||||
|
||||
maj, extra, err = cbg.CborReadHeader(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajUnsignedInt {
|
||||
return fmt.Errorf("wrong type for uint64 field")
|
||||
}
|
||||
t.Size = uint64(extra)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
@ -335,15 +334,25 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Refs ([]api.SealedRef) (slice)
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len("Refs")))); err != nil {
|
||||
if len("Refs") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Refs\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Refs"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("Refs")); err != nil {
|
||||
if _, err := io.WriteString(w, string("Refs")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Refs)))); err != nil {
|
||||
if len(t.Refs) > cbg.MaxLength {
|
||||
return xerrors.Errorf("Slice value in field t.Refs was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Refs))); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range t.Refs {
|
||||
@ -355,9 +364,12 @@ func (t *SealedRefs) MarshalCBOR(w io.Writer) error {
|
||||
}
|
||||
|
||||
func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
br := cbg.GetPeeker(r)
|
||||
*t = SealedRefs{}
|
||||
|
||||
maj, extra, err := cbg.CborReadHeader(br)
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -365,50 +377,354 @@ func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra != 1 {
|
||||
return fmt.Errorf("cbor input had wrong number of fields")
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("SealedRefs: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
// t.Refs ([]api.SealedRef) (slice)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadString(br)
|
||||
if err != nil {
|
||||
return err
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
switch name {
|
||||
// t.Refs ([]api.SealedRef) (slice)
|
||||
case "Refs":
|
||||
|
||||
if name != "Refs" {
|
||||
return fmt.Errorf("expected struct map entry %s to be Refs", name)
|
||||
}
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
maj, extra, err = cbg.CborReadHeader(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Refs: array too large (%d)", extra)
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("t.Refs: array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
|
||||
if maj != cbg.MajArray {
|
||||
return fmt.Errorf("expected cbor array")
|
||||
}
|
||||
if extra > 0 {
|
||||
t.Refs = make([]SealedRef, extra)
|
||||
}
|
||||
for i := 0; i < int(extra); i++ {
|
||||
if extra > 0 {
|
||||
t.Refs = make([]SealedRef, extra)
|
||||
}
|
||||
|
||||
var v SealedRef
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
return err
|
||||
for i := 0; i < int(extra); i++ {
|
||||
|
||||
var v SealedRef
|
||||
if err := v.UnmarshalCBOR(br); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.Refs[i] = v
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *SealTicket) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Value")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Value) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealTicket) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = SealTicket{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("SealTicket: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Value (abi.SealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Value: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Value = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (t *SealSeed) MarshalCBOR(w io.Writer) error {
|
||||
if t == nil {
|
||||
_, err := w.Write(cbg.CborNull)
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte{162}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scratch := make([]byte, 9)
|
||||
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
if len("Value") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Value\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Value")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(t.Value) > cbg.ByteArrayMaxLen {
|
||||
return xerrors.Errorf("Byte array in field t.Value was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
if len("Epoch") > cbg.MaxLength {
|
||||
return xerrors.Errorf("Value in field \"Epoch\" was too long")
|
||||
}
|
||||
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.WriteString(w, string("Epoch")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.Epoch >= 0 {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *SealSeed) UnmarshalCBOR(r io.Reader) error {
|
||||
*t = SealSeed{}
|
||||
|
||||
br := cbg.GetPeeker(r)
|
||||
scratch := make([]byte, 8)
|
||||
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maj != cbg.MajMap {
|
||||
return fmt.Errorf("cbor input should be of type map")
|
||||
}
|
||||
|
||||
if extra > cbg.MaxLength {
|
||||
return fmt.Errorf("SealSeed: map struct too large (%d)", extra)
|
||||
}
|
||||
|
||||
var name string
|
||||
n := extra
|
||||
|
||||
for i := uint64(0); i < n; i++ {
|
||||
|
||||
{
|
||||
sval, err := cbg.ReadStringBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name = string(sval)
|
||||
}
|
||||
|
||||
switch name {
|
||||
// t.Value (abi.InteractiveSealRandomness) (slice)
|
||||
case "Value":
|
||||
|
||||
maj, extra, err = cbg.CborReadHeaderBuf(br, scratch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if extra > cbg.ByteArrayMaxLen {
|
||||
return fmt.Errorf("t.Value: byte array too large (%d)", extra)
|
||||
}
|
||||
if maj != cbg.MajByteString {
|
||||
return fmt.Errorf("expected byte array")
|
||||
}
|
||||
|
||||
if extra > 0 {
|
||||
t.Value = make([]uint8, extra)
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(br, t.Value[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
// t.Epoch (abi.ChainEpoch) (int64)
|
||||
case "Epoch":
|
||||
{
|
||||
maj, extra, err := cbg.CborReadHeaderBuf(br, scratch)
|
||||
var extraI int64
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch maj {
|
||||
case cbg.MajUnsignedInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 positive overflow")
|
||||
}
|
||||
case cbg.MajNegativeInt:
|
||||
extraI = int64(extra)
|
||||
if extraI < 0 {
|
||||
return fmt.Errorf("int64 negative oveflow")
|
||||
}
|
||||
extraI = -1 - extraI
|
||||
default:
|
||||
return fmt.Errorf("wrong type for int64 field: %d", maj)
|
||||
}
|
||||
|
||||
t.Epoch = abi.ChainEpoch(extraI)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown struct field %d: '%s'", i, name)
|
||||
}
|
||||
|
||||
t.Refs[i] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@ -1,28 +1,36 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-jsonrpc"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/lib/jsonrpc"
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/lib/rpcenc"
|
||||
)
|
||||
|
||||
// NewCommonRPC creates a new http jsonrpc client.
|
||||
func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
||||
func NewCommonRPC(ctx context.Context, addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) {
|
||||
var res apistruct.CommonStruct
|
||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.Internal,
|
||||
}, requestHeader)
|
||||
},
|
||||
requestHeader,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
// NewFullNodeRPC creates a new http jsonrpc client.
|
||||
func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
func NewFullNodeRPC(ctx context.Context, addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) {
|
||||
var res apistruct.FullNodeStruct
|
||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.CommonStruct.Internal,
|
||||
&res.Internal,
|
||||
@ -31,14 +39,46 @@ func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonr
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
// NewStorageMinerRPC creates a new http jsonrpc client for storage miner
|
||||
func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||
// NewStorageMinerRPC creates a new http jsonrpc client for miner
|
||||
func NewStorageMinerRPC(ctx context.Context, addr string, requestHeader http.Header, opts ...jsonrpc.Option) (api.StorageMiner, jsonrpc.ClientCloser, error) {
|
||||
var res apistruct.StorageMinerStruct
|
||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.CommonStruct.Internal,
|
||||
&res.Internal,
|
||||
}, requestHeader)
|
||||
},
|
||||
requestHeader,
|
||||
opts...,
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
func NewWorkerRPC(ctx context.Context, addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
}
|
||||
///rpc/v0 -> /rpc/streams/v0/push
|
||||
|
||||
u.Path = path.Join(u.Path, "../streams/v0/push")
|
||||
|
||||
var res apistruct.WorkerStruct
|
||||
closer, err := jsonrpc.NewMergeClient(ctx, addr, "Filecoin",
|
||||
[]interface{}{
|
||||
&res.Internal,
|
||||
},
|
||||
requestHeader,
|
||||
rpcenc.ReaderParamEncoder(u.String()),
|
||||
jsonrpc.WithNoReconnect(),
|
||||
jsonrpc.WithTimeout(30*time.Second),
|
||||
)
|
||||
|
||||
return &res, closer, err
|
||||
}
|
||||
|
||||
420
api/docgen/docgen.go
Normal file
420
api/docgen/docgen.go
Normal file
@ -0,0 +1,420 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-filestore"
|
||||
metrics "github.com/libp2p/go-libp2p-core/metrics"
|
||||
"github.com/libp2p/go-libp2p-core/network"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
protocol "github.com/libp2p/go-libp2p-core/protocol"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-fil-markets/retrievalmarket"
|
||||
"github.com/filecoin-project/go-jsonrpc/auth"
|
||||
"github.com/filecoin-project/go-multistore"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/api/apistruct"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
var ExampleValues = map[reflect.Type]interface{}{
|
||||
reflect.TypeOf(auth.Permission("")): auth.Permission("write"),
|
||||
reflect.TypeOf(""): "string value",
|
||||
reflect.TypeOf(uint64(42)): uint64(42),
|
||||
reflect.TypeOf(byte(7)): byte(7),
|
||||
reflect.TypeOf([]byte{}): []byte("byte array"),
|
||||
}
|
||||
|
||||
func addExample(v interface{}) {
|
||||
ExampleValues[reflect.TypeOf(v)] = v
|
||||
}
|
||||
|
||||
func init() {
|
||||
c, err := cid.Decode("bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ExampleValues[reflect.TypeOf(c)] = c
|
||||
|
||||
c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
tsk := types.NewTipSetKey(c, c2)
|
||||
|
||||
ExampleValues[reflect.TypeOf(tsk)] = tsk
|
||||
|
||||
addr, err := address.NewIDAddress(1234)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ExampleValues[reflect.TypeOf(addr)] = addr
|
||||
|
||||
pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
addExample(pid)
|
||||
addExample(&pid)
|
||||
|
||||
addExample(bitfield.NewFromSet([]uint64{5}))
|
||||
addExample(abi.RegisteredSealProof_StackedDrg32GiBV1)
|
||||
addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1)
|
||||
addExample(abi.ChainEpoch(10101))
|
||||
addExample(crypto.SigTypeBLS)
|
||||
addExample(int64(9))
|
||||
addExample(12.3)
|
||||
addExample(123)
|
||||
addExample(uintptr(0))
|
||||
addExample(abi.MethodNum(1))
|
||||
addExample(exitcode.ExitCode(0))
|
||||
addExample(crypto.DomainSeparationTag_ElectionProofProduction)
|
||||
addExample(true)
|
||||
addExample(abi.UnpaddedPieceSize(1024))
|
||||
addExample(abi.UnpaddedPieceSize(1024).Padded())
|
||||
addExample(abi.DealID(5432))
|
||||
addExample(filestore.StatusFileChanged)
|
||||
addExample(abi.SectorNumber(9))
|
||||
addExample(abi.SectorSize(32 * 1024 * 1024 * 1024))
|
||||
addExample(api.MpoolChange(0))
|
||||
addExample(network.Connected)
|
||||
addExample(dtypes.NetworkName("lotus"))
|
||||
addExample(api.SyncStateStage(1))
|
||||
addExample(build.FullAPIVersion)
|
||||
addExample(api.PCHInbound)
|
||||
addExample(time.Minute)
|
||||
addExample(datatransfer.TransferID(3))
|
||||
addExample(datatransfer.Ongoing)
|
||||
addExample(multistore.StoreID(50))
|
||||
addExample(retrievalmarket.ClientEventDealAccepted)
|
||||
addExample(retrievalmarket.DealStatusNew)
|
||||
addExample(network.ReachabilityPublic)
|
||||
addExample(build.NewestNetworkVersion)
|
||||
addExample(&types.ExecutionTrace{
|
||||
Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message),
|
||||
MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt),
|
||||
})
|
||||
addExample(map[string]types.Actor{
|
||||
"t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor),
|
||||
})
|
||||
addExample(map[string]api.MarketDeal{
|
||||
"t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal),
|
||||
})
|
||||
addExample(map[string]api.MarketBalance{
|
||||
"t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance),
|
||||
})
|
||||
addExample(map[string]*pubsub.TopicScoreSnapshot{
|
||||
"/blocks": {
|
||||
TimeInMesh: time.Minute,
|
||||
FirstMessageDeliveries: 122,
|
||||
MeshMessageDeliveries: 1234,
|
||||
InvalidMessageDeliveries: 3,
|
||||
},
|
||||
})
|
||||
addExample(map[string]metrics.Stats{
|
||||
"12D3KooWSXmXLJmBR1M7i9RW9GQPNUhZSzXKzxDHWtAgNuJAbyEJ": {
|
||||
RateIn: 100,
|
||||
RateOut: 50,
|
||||
TotalIn: 174000,
|
||||
TotalOut: 12500,
|
||||
},
|
||||
})
|
||||
addExample(map[protocol.ID]metrics.Stats{
|
||||
"/fil/hello/1.0.0": {
|
||||
RateIn: 100,
|
||||
RateOut: 50,
|
||||
TotalIn: 174000,
|
||||
TotalOut: 12500,
|
||||
},
|
||||
})
|
||||
|
||||
maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// because reflect.TypeOf(maddr) returns the concrete type...
|
||||
ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr
|
||||
|
||||
}
|
||||
|
||||
func exampleValue(t, parent reflect.Type) interface{} {
|
||||
v, ok := ExampleValues[t]
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
out := reflect.New(t).Elem()
|
||||
reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t)))
|
||||
return out.Interface()
|
||||
case reflect.Chan:
|
||||
return exampleValue(t.Elem(), nil)
|
||||
case reflect.Struct:
|
||||
es := exampleStruct(t, parent)
|
||||
v := reflect.ValueOf(es).Elem().Interface()
|
||||
ExampleValues[t] = v
|
||||
return v
|
||||
case reflect.Array:
|
||||
out := reflect.New(t).Elem()
|
||||
for i := 0; i < t.Len(); i++ {
|
||||
out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t)))
|
||||
}
|
||||
return out.Interface()
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
es := exampleStruct(t.Elem(), t)
|
||||
//ExampleValues[t] = es
|
||||
return es
|
||||
}
|
||||
case reflect.Interface:
|
||||
return struct{}{}
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("No example value for type: %s", t))
|
||||
}
|
||||
|
||||
func exampleStruct(t, parent reflect.Type) interface{} {
|
||||
ns := reflect.New(t)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type == parent {
|
||||
continue
|
||||
}
|
||||
if strings.Title(f.Name) == f.Name {
|
||||
ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t)))
|
||||
}
|
||||
}
|
||||
|
||||
return ns.Interface()
|
||||
}
|
||||
|
||||
type Visitor struct {
|
||||
Methods map[string]ast.Node
|
||||
}
|
||||
|
||||
func (v *Visitor) Visit(node ast.Node) ast.Visitor {
|
||||
st, ok := node.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
return v
|
||||
}
|
||||
|
||||
if st.Name.Name != "FullNode" {
|
||||
return nil
|
||||
}
|
||||
|
||||
iface := st.Type.(*ast.InterfaceType)
|
||||
for _, m := range iface.Methods.List {
|
||||
if len(m.Names) > 0 {
|
||||
v.Methods[m.Names[0].Name] = m
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
const noComment = "There are not yet any comments for this method."
|
||||
|
||||
func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint
|
||||
fset := token.NewFileSet()
|
||||
pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments)
|
||||
if err != nil {
|
||||
fmt.Println("parse error: ", err)
|
||||
}
|
||||
|
||||
ap := pkgs["api"]
|
||||
|
||||
f := ap.Files["api/api_full.go"]
|
||||
|
||||
cmap := ast.NewCommentMap(fset, f, f.Comments)
|
||||
|
||||
v := &Visitor{make(map[string]ast.Node)}
|
||||
ast.Walk(v, pkgs["api"])
|
||||
|
||||
groupDocs := make(map[string]string)
|
||||
out := make(map[string]string)
|
||||
for mn, node := range v.Methods {
|
||||
cs := cmap.Filter(node).Comments()
|
||||
if len(cs) == 0 {
|
||||
out[mn] = noComment
|
||||
} else {
|
||||
for _, c := range cs {
|
||||
if strings.HasPrefix(c.Text(), "MethodGroup:") {
|
||||
parts := strings.Split(c.Text(), "\n")
|
||||
groupName := strings.TrimSpace(parts[0][12:])
|
||||
comment := strings.Join(parts[1:], "\n")
|
||||
groupDocs[groupName] = comment
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
last := cs[len(cs)-1].Text()
|
||||
if !strings.HasPrefix(last, "MethodGroup:") {
|
||||
out[mn] = last
|
||||
} else {
|
||||
out[mn] = noComment
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, groupDocs
|
||||
}
|
||||
|
||||
type MethodGroup struct {
|
||||
GroupName string
|
||||
Header string
|
||||
Methods []*Method
|
||||
}
|
||||
|
||||
type Method struct {
|
||||
Comment string
|
||||
Name string
|
||||
InputExample string
|
||||
ResponseExample string
|
||||
}
|
||||
|
||||
func methodGroupFromName(mn string) string {
|
||||
i := strings.IndexFunc(mn[1:], func(r rune) bool {
|
||||
return unicode.IsUpper(r)
|
||||
})
|
||||
if i < 0 {
|
||||
return ""
|
||||
}
|
||||
return mn[:i+1]
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
comments, groupComments := parseApiASTInfo()
|
||||
|
||||
groups := make(map[string]*MethodGroup)
|
||||
|
||||
var api struct{ api.FullNode }
|
||||
t := reflect.TypeOf(api)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
m := t.Method(i)
|
||||
|
||||
groupName := methodGroupFromName(m.Name)
|
||||
|
||||
g, ok := groups[groupName]
|
||||
if !ok {
|
||||
g = new(MethodGroup)
|
||||
g.Header = groupComments[groupName]
|
||||
g.GroupName = groupName
|
||||
groups[groupName] = g
|
||||
}
|
||||
|
||||
var args []interface{}
|
||||
ft := m.Func.Type()
|
||||
for j := 2; j < ft.NumIn(); j++ {
|
||||
inp := ft.In(j)
|
||||
args = append(args, exampleValue(inp, nil))
|
||||
}
|
||||
|
||||
v, err := json.MarshalIndent(args, "", " ")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
outv := exampleValue(ft.Out(0), nil)
|
||||
|
||||
ov, err := json.MarshalIndent(outv, "", " ")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
g.Methods = append(g.Methods, &Method{
|
||||
Name: m.Name,
|
||||
Comment: comments[m.Name],
|
||||
InputExample: string(v),
|
||||
ResponseExample: string(ov),
|
||||
})
|
||||
}
|
||||
|
||||
var groupslice []*MethodGroup
|
||||
for _, g := range groups {
|
||||
groupslice = append(groupslice, g)
|
||||
}
|
||||
|
||||
sort.Slice(groupslice, func(i, j int) bool {
|
||||
return groupslice[i].GroupName < groupslice[j].GroupName
|
||||
})
|
||||
|
||||
fmt.Printf("# Groups\n")
|
||||
|
||||
for _, g := range groupslice {
|
||||
fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName)
|
||||
for _, method := range g.Methods {
|
||||
fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name)
|
||||
}
|
||||
}
|
||||
|
||||
permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal)
|
||||
commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal)
|
||||
|
||||
for _, g := range groupslice {
|
||||
g := g
|
||||
fmt.Printf("## %s\n", g.GroupName)
|
||||
fmt.Printf("%s\n\n", g.Header)
|
||||
|
||||
sort.Slice(g.Methods, func(i, j int) bool {
|
||||
return g.Methods[i].Name < g.Methods[j].Name
|
||||
})
|
||||
|
||||
for _, m := range g.Methods {
|
||||
fmt.Printf("### %s\n", m.Name)
|
||||
fmt.Printf("%s\n\n", m.Comment)
|
||||
|
||||
meth, ok := permStruct.FieldByName(m.Name)
|
||||
if !ok {
|
||||
meth, ok = commonPermStruct.FieldByName(m.Name)
|
||||
if !ok {
|
||||
panic("no perms for method: " + m.Name)
|
||||
}
|
||||
}
|
||||
|
||||
perms := meth.Tag.Get("perm")
|
||||
|
||||
fmt.Printf("Perms: %s\n\n", perms)
|
||||
|
||||
if strings.Count(m.InputExample, "\n") > 0 {
|
||||
fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample)
|
||||
} else {
|
||||
fmt.Printf("Inputs: `%s`\n\n", m.InputExample)
|
||||
}
|
||||
|
||||
if strings.Count(m.ResponseExample, "\n") > 0 {
|
||||
fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample)
|
||||
} else {
|
||||
fmt.Printf("Response: `%s`\n\n", m.ResponseExample)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
56
api/test/blockminer.go
Normal file
56
api/test/blockminer.go
Normal file
@ -0,0 +1,56 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
|
||||
type BlockMiner struct {
|
||||
ctx context.Context
|
||||
t *testing.T
|
||||
miner TestStorageNode
|
||||
blocktime time.Duration
|
||||
mine int64
|
||||
nulls int64
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewBlockMiner(ctx context.Context, t *testing.T, miner TestStorageNode, blocktime time.Duration) *BlockMiner {
|
||||
return &BlockMiner{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
miner: miner,
|
||||
blocktime: blocktime,
|
||||
mine: int64(1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) MineBlocks() {
|
||||
time.Sleep(time.Second)
|
||||
go func() {
|
||||
defer close(bm.done)
|
||||
for atomic.LoadInt64(&bm.mine) == 1 {
|
||||
time.Sleep(bm.blocktime)
|
||||
nulls := atomic.SwapInt64(&bm.nulls, 0)
|
||||
if err := bm.miner.MineOne(bm.ctx, miner.MineReq{
|
||||
InjectNulls: abi.ChainEpoch(nulls),
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}); err != nil {
|
||||
bm.t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (bm *BlockMiner) Stop() {
|
||||
atomic.AddInt64(&bm.mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-bm.done
|
||||
}
|
||||
116
api/test/ccupgrade.go
Normal file
116
api/test/ccupgrade.go
Normal file
@ -0,0 +1,116 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func TestCCUpgrade(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
CC := abi.SectorNumber(GenesisPreseals + 1)
|
||||
Upgraded := CC + 1
|
||||
|
||||
pledgeSectors(t, ctx, miner, 1, 0, nil)
|
||||
|
||||
sl, err := miner.SectorsList(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(sl) != 1 {
|
||||
t.Fatal("expected 1 sector")
|
||||
}
|
||||
|
||||
if sl[0] != CC {
|
||||
t.Fatal("bad")
|
||||
}
|
||||
|
||||
{
|
||||
si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Less(t, 50000, int(si.Expiration))
|
||||
}
|
||||
|
||||
if err := miner.SectorMarkForUpgrade(ctx, sl[0]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
makeDeal(t, ctx, 6, client, miner, false, false)
|
||||
|
||||
// Validate upgrade
|
||||
|
||||
{
|
||||
exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, exp)
|
||||
require.Greater(t, 50000, int(exp.OnTime))
|
||||
}
|
||||
{
|
||||
exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Less(t, 50000, int(exp.OnTime))
|
||||
}
|
||||
|
||||
dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sector should expire.
|
||||
for {
|
||||
// Wait for the sector to expire.
|
||||
status, err := miner.SectorsStatus(ctx, CC, true)
|
||||
require.NoError(t, err)
|
||||
if status.OnTime == 0 && status.Early == 0 {
|
||||
break
|
||||
}
|
||||
t.Log("waiting for sector to expire")
|
||||
// wait one deadline per loop.
|
||||
time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blocktime)
|
||||
}
|
||||
|
||||
fmt.Println("shutting down mining")
|
||||
atomic.AddInt64(&mine, -1)
|
||||
<-done
|
||||
}
|
||||
@ -8,27 +8,47 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
files "github.com/ipfs/go-ipfs-files"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/ipld/go-car"
|
||||
|
||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
dag "github.com/ipfs/go-merkledag"
|
||||
dstest "github.com/ipfs/go-merkledag/test"
|
||||
unixfile "github.com/ipfs/go-unixfs/file"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
ipld "github.com/ipfs/go-ipld-format"
|
||||
)
|
||||
|
||||
var MineNext = miner.MineReq{
|
||||
InjectNulls: 0,
|
||||
Done: func(bool, abi.ChainEpoch, error) {},
|
||||
}
|
||||
|
||||
func init() {
|
||||
logging.SetAllLoggers(logging.LevelInfo)
|
||||
build.InsecurePoStValidation = true
|
||||
}
|
||||
|
||||
func TestDealFlow(t *testing.T, b APIBuilder) {
|
||||
os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport, fastRet bool) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, []int{0})
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
@ -42,8 +62,67 @@ func TestDealFlow(t *testing.T, b APIBuilder) {
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
data := make([]byte, 1000)
|
||||
rand.New(rand.NewSource(5)).Read(data)
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
makeDeal(t, ctx, 6, client, miner, carExport, fastRet)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
makeDeal(t, ctx, 6, client, miner, false, false)
|
||||
makeDeal(t, ctx, 7, client, miner, false, false)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNodeAPI, miner TestStorageNode, carExport, fastRet bool) {
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(rseed))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
@ -51,37 +130,180 @@ func TestDealFlow(t *testing.T, b APIBuilder) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
deal := startDeal(t, ctx, miner, client, fcid, fastRet)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal, false)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, carExport, data)
|
||||
}
|
||||
|
||||
func TestFastRetrievalDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
data := make([]byte, 1600)
|
||||
rand.New(rand.NewSource(int64(8))).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
mine := true
|
||||
deal := startDeal(t, ctx, miner, client, fcid, true)
|
||||
|
||||
waitDealPublished(t, ctx, miner, deal)
|
||||
fmt.Println("deal published, retrieving")
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
require.NoError(t, err)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid, &info.PieceCID, false, data)
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func TestSenondDealRetrieval(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
|
||||
mine := int64(1)
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
for mine {
|
||||
time.Sleep(time.Second)
|
||||
fmt.Println("mining a block now")
|
||||
if err := sn[0].MineOne(ctx); err != nil {
|
||||
for atomic.LoadInt64(&mine) == 1 {
|
||||
time.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
addr, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
{
|
||||
data1 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(3))).Read(data1)
|
||||
r := bytes.NewReader(data1)
|
||||
|
||||
fcid1, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
data2 := make([]byte, 800)
|
||||
rand.New(rand.NewSource(int64(9))).Read(data2)
|
||||
r2 := bytes.NewReader(data2)
|
||||
|
||||
fcid2, err := client.ClientImportLocal(ctx, r2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
deal1 := startDeal(t, ctx, miner, client, fcid1, true)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal1, true)
|
||||
|
||||
deal2 := startDeal(t, ctx, miner, client, fcid2, true)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
waitDealSealed(t, ctx, miner, client, deal2, false)
|
||||
|
||||
// Retrieval
|
||||
info, err := client.ClientGetDealInfo(ctx, *deal2)
|
||||
require.NoError(t, err)
|
||||
|
||||
rf, _ := miner.SectorsRefs(ctx)
|
||||
fmt.Printf("refs: %+v\n", rf)
|
||||
|
||||
testRetrieval(t, ctx, client, fcid2, &info.PieceCID, false, data2)
|
||||
}
|
||||
deal, err := client.ClientStartDeal(ctx, fcid, addr, maddr, types.NewInt(40000000), 100)
|
||||
|
||||
atomic.AddInt64(&mine, -1)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, fcid cid.Cid, fastRet bool) *cid.Cid {
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
addr, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{
|
||||
Data: &storagemarket.DataRef{
|
||||
TransferType: storagemarket.TTGraphsync,
|
||||
Root: fcid,
|
||||
},
|
||||
Wallet: addr,
|
||||
Miner: maddr,
|
||||
EpochPrice: types.NewInt(1000000),
|
||||
MinBlocksDuration: uint64(build.MinDealDuration),
|
||||
FastRetrieval: fastRet,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
}
|
||||
return deal
|
||||
}
|
||||
|
||||
func waitDealSealed(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, deal *cid.Cid, noseal bool) {
|
||||
loop:
|
||||
for {
|
||||
di, err := client.ClientGetDealInfo(ctx, *deal)
|
||||
@ -89,23 +311,73 @@ loop:
|
||||
t.Fatal(err)
|
||||
}
|
||||
switch di.State {
|
||||
case api.DealRejected:
|
||||
case storagemarket.StorageDealSealing:
|
||||
if noseal {
|
||||
return
|
||||
}
|
||||
startSealingWaiting(t, ctx, miner)
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
t.Fatal("deal rejected")
|
||||
case api.DealFailed:
|
||||
case storagemarket.StorageDealFailing:
|
||||
t.Fatal("deal failed")
|
||||
case api.DealError:
|
||||
t.Fatal("deal errored")
|
||||
case api.DealComplete:
|
||||
case storagemarket.StorageDealError:
|
||||
t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
break loop
|
||||
}
|
||||
fmt.Println("Deal state: ", api.DealStates[di.State])
|
||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||
time.Sleep(time.Second / 2)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieval
|
||||
func waitDealPublished(t *testing.T, ctx context.Context, miner TestStorageNode, deal *cid.Cid) {
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
updates, err := miner.MarketGetDealUpdates(subCtx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatal("context timeout")
|
||||
case di := <-updates:
|
||||
if deal.Equals(di.ProposalCid) {
|
||||
switch di.State {
|
||||
case storagemarket.StorageDealProposalRejected:
|
||||
t.Fatal("deal rejected")
|
||||
case storagemarket.StorageDealFailing:
|
||||
t.Fatal("deal failed")
|
||||
case storagemarket.StorageDealError:
|
||||
t.Fatal("deal errored", di.Message)
|
||||
case storagemarket.StorageDealFinalizing, storagemarket.StorageDealSealing, storagemarket.StorageDealActive:
|
||||
fmt.Println("COMPLETE", di)
|
||||
return
|
||||
}
|
||||
fmt.Println("Deal state: ", storagemarket.DealStates[di.State])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
offers, err := client.ClientFindData(ctx, fcid)
|
||||
func startSealingWaiting(t *testing.T, ctx context.Context, miner TestStorageNode) {
|
||||
snums, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, snum := range snums {
|
||||
si, err := miner.SectorsStatus(ctx, snum, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Logf("Sector state: %s", si.State)
|
||||
if si.State == api.SectorState(sealing.WaitDeals) {
|
||||
require.NoError(t, miner.SectorStartSealing(ctx, snum))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testRetrieval(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, fcid cid.Cid, piece *cid.Cid, carExport bool, data []byte) {
|
||||
offers, err := client.ClientFindData(ctx, fcid, piece)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -118,16 +390,22 @@ loop:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rpath)
|
||||
defer os.RemoveAll(rpath) //nolint:errcheck
|
||||
|
||||
caddr, err := client.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = client.ClientRetrieve(ctx, offers[0].Order(caddr), filepath.Join(rpath, "ret"))
|
||||
if err != nil {
|
||||
t.Fatalf("%+v", err)
|
||||
ref := &api.FileRef{
|
||||
Path: filepath.Join(rpath, "ret"),
|
||||
IsCAR: carExport,
|
||||
}
|
||||
updates, err := client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref)
|
||||
for update := range updates {
|
||||
if update.Err != "" {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret"))
|
||||
@ -135,11 +413,41 @@ loop:
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if carExport {
|
||||
rdata = extractCarData(t, ctx, rdata, rpath)
|
||||
}
|
||||
|
||||
if !bytes.Equal(rdata, data) {
|
||||
t.Fatal("wrong data retrieved")
|
||||
}
|
||||
|
||||
mine = false
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte {
|
||||
bserv := dstest.Bserv()
|
||||
ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b, err := bserv.GetBlock(ctx, ch.Roots[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nd, err := ipld.Decode(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
dserv := dag.NewDAGService(bserv)
|
||||
fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
outPath := filepath.Join(rpath, "retLoadedCAR")
|
||||
if err := files.WriteTo(fil, outPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rdata, err = ioutil.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return rdata
|
||||
}
|
||||
|
||||
@ -1,31 +1,207 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
//nolint:deadcode,varcheck
|
||||
var log = logging.Logger("apitest")
|
||||
|
||||
func (ts *testSuite) testMining(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, sn := ts.makeNodes(t, 1, []int{0})
|
||||
apis, sn := ts.makeNodes(t, 1, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), h1.Height())
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
<-newHeads
|
||||
initHead := (<-newHeads)[0]
|
||||
baseHeight := initHead.Val.Height()
|
||||
|
||||
err = sn[0].MineOne(ctx)
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(h1.Height()), int64(baseHeight))
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), h2.Height())
|
||||
require.Greater(t, int64(h2.Height()), int64(h1.Height()))
|
||||
}
|
||||
|
||||
func (ts *testSuite) testMiningReal(t *testing.T) {
|
||||
build.InsecurePoStValidation = false
|
||||
defer func() {
|
||||
build.InsecurePoStValidation = true
|
||||
}()
|
||||
|
||||
ctx := context.Background()
|
||||
apis, sn := ts.makeNodes(t, 1, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
newHeads, err := api.ChainNotify(ctx)
|
||||
require.NoError(t, err)
|
||||
initHead := (<-newHeads)[0]
|
||||
if initHead.Val.Height() != 2 {
|
||||
<-newHeads
|
||||
}
|
||||
|
||||
h1, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(2), h1.Height())
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err := api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(3), h2.Height())
|
||||
|
||||
MineUntilBlock(ctx, t, apis[0], sn[0], nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-newHeads
|
||||
|
||||
h2, err = api.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, abi.ChainEpoch(4), h2.Height())
|
||||
}
|
||||
|
||||
func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
// test making a deal with a fresh miner, and see if it starts to mine
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, []StorageMiner{
|
||||
{Full: 0, Preseal: PresealGenesis},
|
||||
{Full: 0, Preseal: 0}, // TODO: Add support for miners on non-first full node
|
||||
})
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
provider := sn[1]
|
||||
genesisMiner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := provider.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
data := make([]byte, 600)
|
||||
rand.New(rand.NewSource(5)).Read(data)
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
fcid, err := client.ClientImportLocal(ctx, r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("FILE CID: ", fcid)
|
||||
|
||||
var mine int32 = 1
|
||||
done := make(chan struct{})
|
||||
minedTwo := make(chan struct{})
|
||||
|
||||
m2addr, err := sn[1].ActorAddress(context.TODO())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(done)
|
||||
|
||||
complChan := minedTwo
|
||||
for atomic.LoadInt32(&mine) != 0 {
|
||||
wait := make(chan int)
|
||||
mdone := func(mined bool, _ abi.ChainEpoch, err error) {
|
||||
n := 0
|
||||
if mined {
|
||||
n = 1
|
||||
}
|
||||
wait <- n
|
||||
}
|
||||
|
||||
if err := sn[0].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if err := sn[1].MineOne(ctx, miner.MineReq{Done: mdone}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
expect := <-wait
|
||||
expect += <-wait
|
||||
|
||||
time.Sleep(blocktime)
|
||||
if expect == 0 {
|
||||
// null block
|
||||
continue
|
||||
}
|
||||
|
||||
var nodeOneMined bool
|
||||
for _, node := range sn {
|
||||
mb, err := node.MiningBase(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, b := range mb.Blocks() {
|
||||
if b.Miner == m2addr {
|
||||
nodeOneMined = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if nodeOneMined && complChan != nil {
|
||||
close(complChan)
|
||||
complChan = nil
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
deal := startDeal(t, ctx, provider, client, fcid, false)
|
||||
|
||||
// TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this
|
||||
time.Sleep(time.Second)
|
||||
|
||||
waitDealSealed(t, ctx, provider, client, deal, false)
|
||||
|
||||
<-minedTwo
|
||||
|
||||
atomic.StoreInt32(&mine, 0)
|
||||
fmt.Println("shutting down mining")
|
||||
<-done
|
||||
}
|
||||
|
||||
259
api/test/paych.go
Normal file
259
api/test/paych.go
Normal file
@ -0,0 +1,259 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/events"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
)
|
||||
|
||||
func TestPaymentChannels(t *testing.T, b APIBuilder, blocktime time.Duration) {
|
||||
_ = os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 2, OneMiner)
|
||||
|
||||
paymentCreator := n[0]
|
||||
paymentReceiver := n[1]
|
||||
miner := sn[0]
|
||||
|
||||
// get everyone connected
|
||||
addrs, err := paymentCreator.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := paymentReceiver.NetConnect(ctx, addrs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrs); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// start mining blocks
|
||||
bm := NewBlockMiner(ctx, t, miner, blocktime)
|
||||
bm.MineBlocks()
|
||||
|
||||
// send some funds to register the receiver
|
||||
receiverAddr, err := paymentReceiver.WalletNew(ctx, wallet.ActSigType("secp256k1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
SendFunds(ctx, t, paymentCreator, receiverAddr, abi.NewTokenAmount(1e18))
|
||||
|
||||
// setup the payment channel
|
||||
createrAddr, err := paymentCreator.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
channelAmt := int64(100000)
|
||||
channelInfo, err := paymentCreator.PaychGet(ctx, createrAddr, receiverAddr, abi.NewTokenAmount(channelAmt))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
channel, err := paymentCreator.PaychGetWaitReady(ctx, channelInfo.WaitSentinel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// allocate three lanes
|
||||
var lanes []uint64
|
||||
for i := 0; i < 3; i++ {
|
||||
lane, err := paymentCreator.PaychAllocateLane(ctx, channel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lanes = append(lanes, lane)
|
||||
}
|
||||
|
||||
// Make two vouchers each for each lane, then save on the other side
|
||||
// Note that the voucher with a value of 2000 has a higher nonce, so it
|
||||
// supersedes the voucher with a value of 1000
|
||||
for _, lane := range lanes {
|
||||
vouch1, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(1000), lane)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if vouch1.Voucher == nil {
|
||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch1.Shortfall))
|
||||
}
|
||||
vouch2, err := paymentCreator.PaychVoucherCreate(ctx, channel, abi.NewTokenAmount(2000), lane)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if vouch2.Voucher == nil {
|
||||
t.Fatal(fmt.Errorf("Not enough funds to create voucher: missing %d", vouch2.Shortfall))
|
||||
}
|
||||
delta1, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch1.Voucher, nil, abi.NewTokenAmount(1000))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !delta1.Equals(abi.NewTokenAmount(1000)) {
|
||||
t.Fatal("voucher didn't have the right amount")
|
||||
}
|
||||
delta2, err := paymentReceiver.PaychVoucherAdd(ctx, channel, vouch2.Voucher, nil, abi.NewTokenAmount(1000))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !delta2.Equals(abi.NewTokenAmount(1000)) {
|
||||
t.Fatal("voucher didn't have the right amount")
|
||||
}
|
||||
}
|
||||
|
||||
// settle the payment channel
|
||||
settleMsgCid, err := paymentCreator.PaychSettle(ctx, channel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
res := waitForMessage(ctx, t, paymentCreator, settleMsgCid, time.Second*10, "settle")
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("Unable to settle payment channel")
|
||||
}
|
||||
|
||||
// wait for the receiver to submit their vouchers
|
||||
ev := events.NewEvents(ctx, paymentCreator)
|
||||
preds := state.NewStatePredicates(paymentCreator)
|
||||
finished := make(chan struct{})
|
||||
err = ev.StateChanged(func(ts *types.TipSet) (done bool, more bool, err error) {
|
||||
act, err := paymentCreator.StateReadState(ctx, channel, ts.Key())
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
state := act.State.(paych.State)
|
||||
if state.ToSend.GreaterThanEqual(abi.NewTokenAmount(6000)) {
|
||||
return true, false, nil
|
||||
}
|
||||
return false, true, nil
|
||||
}, func(oldTs, newTs *types.TipSet, states events.StateChange, curH abi.ChainEpoch) (more bool, err error) {
|
||||
toSendChange := states.(*state.PayChToSendChange)
|
||||
if toSendChange.NewToSend.GreaterThanEqual(abi.NewTokenAmount(6000)) {
|
||||
close(finished)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}, func(ctx context.Context, ts *types.TipSet) error {
|
||||
return nil
|
||||
}, int(build.MessageConfidence)+1, build.SealRandomnessLookbackLimit, func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) {
|
||||
return preds.OnPaymentChannelActorChanged(channel, preds.OnToSendAmountChanges())(ctx, oldTs.Key(), newTs.Key())
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-finished:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("Timed out waiting for receiver to submit vouchers")
|
||||
}
|
||||
|
||||
// wait for the settlement period to pass before collecting
|
||||
waitForBlocks(ctx, t, bm, paymentReceiver, receiverAddr, paych.SettleDelay)
|
||||
|
||||
creatorPreCollectBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// collect funds (from receiver, though either party can do it)
|
||||
collectMsg, err := paymentReceiver.PaychCollect(ctx, channel)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("unable to collect on payment channel")
|
||||
}
|
||||
|
||||
// Finally, check the balance for the creator
|
||||
currentCreatorBalance, err := paymentCreator.WalletBalance(ctx, createrAddr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// The highest nonce voucher that the creator sent on each lane is 2000
|
||||
totalVouchers := int64(len(lanes) * 2000)
|
||||
|
||||
// When receiver submits the tokens to the chain, creator should get a
|
||||
// refund on the remaining balance, which is
|
||||
// channel amount - total voucher value
|
||||
expectedRefund := channelAmt - totalVouchers
|
||||
delta := big.Sub(currentCreatorBalance, creatorPreCollectBalance)
|
||||
if !delta.Equals(abi.NewTokenAmount(expectedRefund)) {
|
||||
t.Fatalf("did not send correct funds from creator: expected %d, got %d", expectedRefund, delta)
|
||||
}
|
||||
|
||||
// shut down mining
|
||||
bm.Stop()
|
||||
}
|
||||
|
||||
func waitForBlocks(ctx context.Context, t *testing.T, bm *BlockMiner, paymentReceiver TestNode, receiverAddr address.Address, count int) {
|
||||
// We need to add null blocks in batches, if we add too many the chain can't sync
|
||||
batchSize := 60
|
||||
for i := 0; i < count; i += batchSize {
|
||||
size := batchSize
|
||||
if i > count {
|
||||
size = count - i
|
||||
}
|
||||
|
||||
// Add a batch of null blocks
|
||||
atomic.StoreInt64(&bm.nulls, int64(size-1))
|
||||
|
||||
// Add a real block
|
||||
m, err := paymentReceiver.MpoolPushMessage(ctx, &types.Message{
|
||||
To: builtin.BurntFundsActorAddr,
|
||||
From: receiverAddr,
|
||||
Value: types.NewInt(0),
|
||||
}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = paymentReceiver.StateWaitMsg(ctx, m.Cid(), 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForMessage(ctx context.Context, t *testing.T, paymentCreator TestNode, msgCid cid.Cid, duration time.Duration, desc string) *api.MsgLookup {
|
||||
ctx, cancel := context.WithTimeout(ctx, duration)
|
||||
defer cancel()
|
||||
|
||||
fmt.Println("Waiting for", desc)
|
||||
res, err := paymentCreator.StateWaitMsg(ctx, msgCid, 1)
|
||||
if err != nil {
|
||||
fmt.Println("Error waiting for", desc, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatalf("did not successfully send %s", desc)
|
||||
}
|
||||
fmt.Println("Confirmed", desc)
|
||||
return res
|
||||
}
|
||||
@ -4,19 +4,39 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
|
||||
type TestNode struct {
|
||||
api.FullNode
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
}
|
||||
|
||||
type TestStorageNode struct {
|
||||
api.StorageMiner
|
||||
// ListenAddr is the address on which an API server is listening, if an
|
||||
// API server is created for this Node
|
||||
ListenAddr multiaddr.Multiaddr
|
||||
|
||||
MineOne func(context.Context) error
|
||||
MineOne func(context.Context, miner.MineReq) error
|
||||
}
|
||||
|
||||
var PresealGenesis = -1
|
||||
|
||||
const GenesisPreseals = 2
|
||||
|
||||
type StorageMiner struct {
|
||||
Full int
|
||||
Preseal int
|
||||
}
|
||||
|
||||
// APIBuilder is a function which is invoked in test suite to provide
|
||||
@ -24,7 +44,7 @@ type TestStorageNode struct {
|
||||
//
|
||||
// storage array defines storage nodes, numbers in the array specify full node
|
||||
// index the storage node 'belongs' to
|
||||
type APIBuilder func(t *testing.T, nFull int, storage []int) ([]TestNode, []TestStorageNode)
|
||||
type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner) ([]TestNode, []TestStorageNode)
|
||||
type testSuite struct {
|
||||
makeNodes APIBuilder
|
||||
}
|
||||
@ -39,25 +59,28 @@ func TestApis(t *testing.T, b APIBuilder) {
|
||||
t.Run("id", ts.testID)
|
||||
t.Run("testConnectTwo", ts.testConnectTwo)
|
||||
t.Run("testMining", ts.testMining)
|
||||
t.Run("testMiningReal", ts.testMiningReal)
|
||||
}
|
||||
|
||||
var OneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}}
|
||||
|
||||
func (ts *testSuite) testVersion(t *testing.T) {
|
||||
build.RunningNodeType = build.NodeFull
|
||||
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, 1, []int{0})
|
||||
apis, _ := ts.makeNodes(t, 1, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v.Version != build.BuildVersion {
|
||||
t.Error("Version didn't work properly")
|
||||
}
|
||||
require.Equal(t, v.Version, build.BuildVersion)
|
||||
}
|
||||
|
||||
func (ts *testSuite) testID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, 1, []int{0})
|
||||
apis, _ := ts.makeNodes(t, 1, OneMiner)
|
||||
api := apis[0]
|
||||
|
||||
id, err := api.ID(ctx)
|
||||
@ -69,7 +92,7 @@ func (ts *testSuite) testID(t *testing.T) {
|
||||
|
||||
func (ts *testSuite) testConnectTwo(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
apis, _ := ts.makeNodes(t, 2, []int{0})
|
||||
apis, _ := ts.makeNodes(t, 2, OneMiner)
|
||||
|
||||
p, err := apis[0].NetPeers(ctx)
|
||||
if err != nil {
|
||||
|
||||
86
api/test/util.go
Normal file
86
api/test/util.go
Normal file
@ -0,0 +1,86 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/miner"
|
||||
)
|
||||
|
||||
func SendFunds(ctx context.Context, t *testing.T, sender TestNode, addr address.Address, amount abi.TokenAmount) {
|
||||
senderAddr, err := sender.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
From: senderAddr,
|
||||
To: addr,
|
||||
Value: amount,
|
||||
}
|
||||
|
||||
sm, err := sender.MpoolPushMessage(ctx, msg, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res, err := sender.StateWaitMsg(ctx, sm.Cid(), 1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if res.Receipt.ExitCode != 0 {
|
||||
t.Fatal("did not successfully send money")
|
||||
}
|
||||
}
|
||||
|
||||
func MineUntilBlock(ctx context.Context, t *testing.T, fn TestNode, sn TestStorageNode, cb func(abi.ChainEpoch)) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
var success bool
|
||||
var err error
|
||||
var epoch abi.ChainEpoch
|
||||
wait := make(chan struct{})
|
||||
mineErr := sn.MineOne(ctx, miner.MineReq{
|
||||
Done: func(win bool, ep abi.ChainEpoch, e error) {
|
||||
success = win
|
||||
err = e
|
||||
epoch = ep
|
||||
wait <- struct{}{}
|
||||
},
|
||||
})
|
||||
if mineErr != nil {
|
||||
t.Fatal(mineErr)
|
||||
}
|
||||
<-wait
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if success {
|
||||
// Wait until it shows up on the given full nodes ChainHead
|
||||
nloops := 50
|
||||
for i := 0; i < nloops; i++ {
|
||||
ts, err := fn.ChainHead(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ts.Height() == epoch {
|
||||
break
|
||||
}
|
||||
if i == nloops-1 {
|
||||
t.Fatal("block never managed to sync to node")
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
if cb != nil {
|
||||
cb(epoch)
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Log("did not mine block, trying again", i)
|
||||
}
|
||||
t.Fatal("failed to mine 1000 times in a row...")
|
||||
}
|
||||
324
api/test/window_post.go
Normal file
324
api/test/window_post.go
Normal file
@ -0,0 +1,324 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/extern/sector-storage/mock"
|
||||
sealing "github.com/filecoin-project/lotus/extern/storage-sealing"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
bminer "github.com/filecoin-project/lotus/miner"
|
||||
"github.com/filecoin-project/lotus/node/impl"
|
||||
)
|
||||
|
||||
func init() {
|
||||
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to set BELLMAN_NO_GPU env variable: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := true
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for mine {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, bminer.MineReq{Done: func(bool, abi.ChainEpoch, error) {
|
||||
|
||||
}}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
mine = false
|
||||
<-done
|
||||
}
|
||||
|
||||
func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n, existing int, blockNotif <-chan struct{}) {
|
||||
for i := 0; i < n; i++ {
|
||||
err := miner.PledgeSector(ctx)
|
||||
require.NoError(t, err)
|
||||
if i%3 == 0 && blockNotif != nil {
|
||||
<-blockNotif
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM
|
||||
require.NoError(t, err)
|
||||
fmt.Printf("Sectors: %d\n", len(s))
|
||||
if len(s) >= n+existing {
|
||||
break
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
fmt.Printf("All sectors is fsm\n")
|
||||
|
||||
s, err := miner.SectorsList(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
toCheck := map[abi.SectorNumber]struct{}{}
|
||||
for _, number := range s {
|
||||
toCheck[number] = struct{}{}
|
||||
}
|
||||
|
||||
for len(toCheck) > 0 {
|
||||
for n := range toCheck {
|
||||
st, err := miner.SectorsStatus(ctx, n, false)
|
||||
require.NoError(t, err)
|
||||
if st.State == api.SectorState(sealing.Proving) {
|
||||
delete(toCheck, n)
|
||||
}
|
||||
if strings.Contains(string(st.State), "Fail") {
|
||||
t.Fatal("sector in a failed state", st.State)
|
||||
}
|
||||
}
|
||||
|
||||
build.Clock.Sleep(100 * time.Millisecond)
|
||||
fmt.Printf("WaitSeal: %d\n", len(s))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) {
|
||||
ctx := context.Background()
|
||||
n, sn := b(t, 1, OneMiner)
|
||||
client := n[0].FullNode.(*impl.FullNodeAPI)
|
||||
miner := sn[0]
|
||||
|
||||
addrinfo, err := client.NetAddrsListen(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := miner.NetConnect(ctx, addrinfo); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
build.Clock.Sleep(time.Second)
|
||||
|
||||
mine := true
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
for mine {
|
||||
build.Clock.Sleep(blocktime)
|
||||
if err := sn[0].MineOne(ctx, MineNext); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
pledgeSectors(t, ctx, miner, nSectors, 0, nil)
|
||||
|
||||
maddr, err := miner.ActorAddress(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
mid, err := address.IDFromAddress(maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Running one proving period\n")
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
|
||||
break
|
||||
}
|
||||
|
||||
if head.Height()%100 == 0 {
|
||||
fmt.Printf("@%d\n", head.Height())
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
ssz, err := miner.ActorSectorSize(ctx, maddr)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals)))
|
||||
|
||||
fmt.Printf("Drop some sectors\n")
|
||||
|
||||
// Drop 2 sectors from deadline 2 partition 0 (full partition / deadline)
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
secs := parts[0].AllSectors
|
||||
require.NoError(t, err)
|
||||
n, err := secs.Count()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), n)
|
||||
|
||||
// Drop the partition
|
||||
err = secs.ForEach(func(sid uint64) error {
|
||||
return miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkCorrupted(abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sid),
|
||||
}, true)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var s abi.SectorID
|
||||
|
||||
// Drop 1 sectors from deadline 3 partition 0
|
||||
{
|
||||
parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(parts), 0)
|
||||
|
||||
secs := parts[0].AllSectors
|
||||
require.NoError(t, err)
|
||||
n, err := secs.Count()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), n)
|
||||
|
||||
// Drop the sector
|
||||
sn, err := secs.First()
|
||||
require.NoError(t, err)
|
||||
|
||||
all, err := secs.All(2)
|
||||
require.NoError(t, err)
|
||||
fmt.Println("the sectors", all)
|
||||
|
||||
s = abi.SectorID{
|
||||
Miner: abi.ActorID(mid),
|
||||
Number: abi.SectorNumber(sn),
|
||||
}
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, true)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Printf("Go through another PP, wait for sectors to become faulty\n")
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+(di.WPoStProvingPeriod)+2 {
|
||||
break
|
||||
}
|
||||
|
||||
if head.Height()%100 == 0 {
|
||||
fmt.Printf("@%d\n", head.Height())
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors := p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+GenesisPreseals-3, int(sectors)) // -3 just removed sectors
|
||||
|
||||
fmt.Printf("Recover one sector\n")
|
||||
|
||||
err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > di.PeriodStart+di.WPoStProvingPeriod+2 {
|
||||
break
|
||||
}
|
||||
|
||||
if head.Height()%100 == 0 {
|
||||
fmt.Printf("@%d\n", head.Height())
|
||||
}
|
||||
build.Clock.Sleep(blocktime)
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+GenesisPreseals-2, int(sectors)) // -2 not recovered sectors
|
||||
|
||||
// pledge a sector after recovery
|
||||
|
||||
pledgeSectors(t, ctx, miner, 1, nSectors, nil)
|
||||
|
||||
{
|
||||
// wait a bit more
|
||||
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
waitUntil := head.Height() + 10
|
||||
|
||||
for {
|
||||
head, err := client.ChainHead(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
if head.Height() > waitUntil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, p.MinerPower, p.TotalPower)
|
||||
|
||||
sectors = p.MinerPower.RawBytePower.Uint64() / uint64(ssz)
|
||||
require.Equal(t, nSectors+GenesisPreseals-2+1, int(sectors)) // -2 not recovered sectors + 1 just pledged
|
||||
|
||||
mine = false
|
||||
<-done
|
||||
}
|
||||
110
api/types.go
110
api/types.go
@ -2,41 +2,20 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
datatransfer "github.com/filecoin-project/go-data-transfer"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
type DealState = uint64
|
||||
|
||||
const (
|
||||
DealUnknown = DealState(iota)
|
||||
DealRejected // Provider didn't like the proposal
|
||||
DealAccepted // Proposal accepted, data moved
|
||||
DealStaged // Data put into the sector
|
||||
DealSealing // Data in process of being sealed
|
||||
|
||||
DealFailed
|
||||
DealComplete
|
||||
|
||||
// Internal
|
||||
|
||||
DealError // deal failed with an unexpected error
|
||||
|
||||
DealNoUpdate = DealUnknown
|
||||
)
|
||||
|
||||
var DealStates = []string{
|
||||
"DealUnknown",
|
||||
"DealRejected",
|
||||
"DealAccepted",
|
||||
"DealStaged",
|
||||
"DealSealing",
|
||||
"DealFailed",
|
||||
"DealComplete",
|
||||
"DealError",
|
||||
}
|
||||
|
||||
// TODO: check if this exists anywhere else
|
||||
|
||||
type MultiaddrSlice []ma.Multiaddr
|
||||
|
||||
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
|
||||
@ -57,3 +36,74 @@ func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
|
||||
}
|
||||
|
||||
var _ json.Unmarshaler = new(MultiaddrSlice)
|
||||
|
||||
type ObjStat struct {
|
||||
Size uint64
|
||||
Links uint64
|
||||
}
|
||||
|
||||
type PubsubScore struct {
|
||||
ID peer.ID
|
||||
Score *pubsub.PeerScoreSnapshot
|
||||
}
|
||||
|
||||
type MessageSendSpec struct {
|
||||
MaxFee abi.TokenAmount
|
||||
}
|
||||
|
||||
var DefaultMessageSendSpec = MessageSendSpec{
|
||||
// MaxFee of 0.1FIL
|
||||
MaxFee: abi.NewTokenAmount(int64(build.FilecoinPrecision) / 10),
|
||||
}
|
||||
|
||||
func (ms *MessageSendSpec) Get() MessageSendSpec {
|
||||
if ms == nil {
|
||||
return DefaultMessageSendSpec
|
||||
}
|
||||
|
||||
return *ms
|
||||
}
|
||||
|
||||
type DataTransferChannel struct {
|
||||
TransferID datatransfer.TransferID
|
||||
Status datatransfer.Status
|
||||
BaseCID cid.Cid
|
||||
IsInitiator bool
|
||||
IsSender bool
|
||||
Voucher string
|
||||
Message string
|
||||
OtherPeer peer.ID
|
||||
Transferred uint64
|
||||
}
|
||||
|
||||
// NewDataTransferChannel constructs an API DataTransferChannel type from full channel state snapshot and a host id
|
||||
func NewDataTransferChannel(hostID peer.ID, channelState datatransfer.ChannelState) DataTransferChannel {
|
||||
channel := DataTransferChannel{
|
||||
TransferID: channelState.TransferID(),
|
||||
Status: channelState.Status(),
|
||||
BaseCID: channelState.BaseCID(),
|
||||
IsSender: channelState.Sender() == hostID,
|
||||
Message: channelState.Message(),
|
||||
}
|
||||
stringer, ok := channelState.Voucher().(fmt.Stringer)
|
||||
if ok {
|
||||
channel.Voucher = stringer.String()
|
||||
} else {
|
||||
voucherJSON, err := json.Marshal(channelState.Voucher())
|
||||
if err != nil {
|
||||
channel.Voucher = fmt.Errorf("Voucher Serialization: %w", err).Error()
|
||||
} else {
|
||||
channel.Voucher = string(voucherJSON)
|
||||
}
|
||||
}
|
||||
if channel.IsSender {
|
||||
channel.IsInitiator = !channelState.IsPull()
|
||||
channel.Transferred = channelState.Sent()
|
||||
channel.OtherPeer = channelState.Recipient()
|
||||
} else {
|
||||
channel.IsInitiator = channelState.IsPull()
|
||||
channel.Transferred = channelState.Received()
|
||||
channel.OtherPeer = channelState.Sender()
|
||||
}
|
||||
return channel
|
||||
}
|
||||
|
||||
@ -4,12 +4,12 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/go-state-types/crypto"
|
||||
)
|
||||
|
||||
type SignFunc = func(context.Context, []byte) (*types.Signature, error)
|
||||
type SignFunc = func(context.Context, []byte) (*crypto.Signature, error)
|
||||
|
||||
type Signer func(context.Context, address.Address, []byte) (*types.Signature, error)
|
||||
type Signer func(context.Context, address.Address, []byte) (*crypto.Signature, error)
|
||||
|
||||
type Signable interface {
|
||||
Sign(context.Context, SignFunc) error
|
||||
@ -17,7 +17,7 @@ type Signable interface {
|
||||
|
||||
func SignWith(ctx context.Context, signer Signer, addr address.Address, signable ...Signable) error {
|
||||
for _, s := range signable {
|
||||
err := s.Sign(ctx, func(ctx context.Context, b []byte) (*types.Signature, error) {
|
||||
err := s.Sign(ctx, func(ctx context.Context, b []byte) (*crypto.Signature, error) {
|
||||
return signer(ctx, addr, b)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
177
bin/dist_get
177
bin/dist_get
@ -1,177 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
GOCC=${GOCC=go}
|
||||
|
||||
die() {
|
||||
echo "$@" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
have_binary() {
|
||||
type "$1" > /dev/null 2> /dev/null
|
||||
}
|
||||
|
||||
check_writable() {
|
||||
printf "" > "$1" && rm "$1"
|
||||
}
|
||||
|
||||
try_download() {
|
||||
url="$1"
|
||||
output="$2"
|
||||
command="$3"
|
||||
util_name="$(set -- $command; echo "$1")"
|
||||
|
||||
if ! have_binary "$util_name"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
printf '==> Using %s to download "%s" to "%s"\n' "$util_name" "$url" "$output"
|
||||
if eval "$command"; then
|
||||
echo "==> Download complete!"
|
||||
return
|
||||
else
|
||||
echo "error: couldn't download with $util_name ($?)"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
download() {
|
||||
dl_url="$1"
|
||||
dl_output="$2"
|
||||
|
||||
test "$#" -eq "2" || die "download requires exactly two arguments, was given $@"
|
||||
|
||||
if ! check_writable "$dl_output"; then
|
||||
die "download error: cannot write to $dl_output"
|
||||
fi
|
||||
|
||||
try_download "$dl_url" "$dl_output" "wget '$dl_url' -O '$dl_output'" && return
|
||||
try_download "$dl_url" "$dl_output" "curl --silent --fail --output '$dl_output' '$dl_url'" && return
|
||||
try_download "$dl_url" "$dl_output" "fetch '$dl_url' -o '$dl_output'" && return
|
||||
try_download "$dl_url" "$dl_output" "http '$dl_url' > '$dl_output'" && return
|
||||
try_download "$dl_url" "$dl_output" "ftp -o '$dl_output' '$dl_url'" && return
|
||||
|
||||
die "Unable to download $dl_url. exiting."
|
||||
}
|
||||
|
||||
unarchive() {
|
||||
ua_archivetype="$1"
|
||||
ua_infile="$2"
|
||||
ua_outfile="$3"
|
||||
ua_distname="$4"
|
||||
ua_binpostfix=""
|
||||
ua_os=$(uname -o)
|
||||
|
||||
if [ "$ua_os" = "Msys" ] || [ "$ua_os" = "Cygwin" ] ; then
|
||||
ua_binpostfix=".exe"
|
||||
fi
|
||||
ua_outfile="$ua_outfile$ua_binpostfix"
|
||||
|
||||
if ! check_writable "$ua_outfile"; then
|
||||
die "unarchive error: cannot write to $ua_outfile"
|
||||
fi
|
||||
|
||||
case "$ua_archivetype" in
|
||||
tar.gz)
|
||||
if have_binary tar; then
|
||||
echo "==> using 'tar' to extract binary from archive"
|
||||
< "$ua_infile" tar -Ozxf - "$ua_distname/$ua_distname$ua_binpostfix" > "$ua_outfile" \
|
||||
|| die "tar has failed"
|
||||
else
|
||||
die "no binary on system for extracting tar files"
|
||||
fi
|
||||
;;
|
||||
zip)
|
||||
if have_binary unzip; then
|
||||
echo "==> using 'unzip' to extract binary from archive"
|
||||
unzip -p "$ua_infile" "$ua_distname/$ua_distname$ua_binpostfix" > "$ua_outfile" \
|
||||
|| die "unzip has failed"
|
||||
else
|
||||
die "no installed method for extracting .zip archives"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
die "unrecognized archive type '$ua_archivetype'"
|
||||
esac
|
||||
|
||||
chmod +x "$ua_outfile" || die "chmod has failed"
|
||||
}
|
||||
|
||||
get_go_vars() {
|
||||
if [ ! -z "$GOOS" ] && [ ! -z "$GOARCH" ]; then
|
||||
printf "%s-%s" "$GOOS" "$GOARCH"
|
||||
elif have_binary go; then
|
||||
printf "%s-%s" "$($GOCC env GOOS)" "$($GOCC env GOARCH)"
|
||||
else
|
||||
die "no way of determining system GOOS and GOARCH\nPlease manually set GOOS and GOARCH then retry."
|
||||
fi
|
||||
}
|
||||
|
||||
mkurl() {
|
||||
m_root="$1"
|
||||
m_name="$2"
|
||||
m_vers="$3"
|
||||
m_archive="$4"
|
||||
m_govars=$(get_go_vars) || die "could not get go env vars"
|
||||
|
||||
echo "https://ipfs.io$m_root/$m_name/$m_vers/${m_name}_${m_vers}_$m_govars.$m_archive"
|
||||
}
|
||||
|
||||
distroot="$1"
|
||||
distname="$2"
|
||||
outpath="$3"
|
||||
version="$4"
|
||||
|
||||
if [ -z "$distroot" ] || [ -z "$distname" ] || [ -z "$outpath" ] || [ -z "$version" ]; then
|
||||
die "usage: dist_get <distroot> <distname> <outpath> <version>"
|
||||
fi
|
||||
|
||||
case $version in
|
||||
v*)
|
||||
# correct input
|
||||
;;
|
||||
*)
|
||||
echo "invalid version '$version'" >&2
|
||||
die "versions must begin with 'v', for example: v0.4.0"
|
||||
;;
|
||||
esac
|
||||
|
||||
# TODO: don't depend on the go tool being installed to detect this
|
||||
goenv=$(get_go_vars) || die "could not get go env vars"
|
||||
|
||||
case $goenv in
|
||||
linux-*)
|
||||
archive="tar.gz"
|
||||
;;
|
||||
darwin-*)
|
||||
archive="tar.gz"
|
||||
;;
|
||||
windows-*)
|
||||
archive="zip"
|
||||
;;
|
||||
freebsd-*)
|
||||
archive="tar.gz"
|
||||
;;
|
||||
openbsd-*)
|
||||
archive="tar.gz"
|
||||
;;
|
||||
*)
|
||||
echo "unrecognized system environment: $goenv" >&2
|
||||
die "currently only linux, darwin, windows and freebsd are supported by this script"
|
||||
esac
|
||||
|
||||
|
||||
mkdir -p bin/tmp
|
||||
|
||||
url=$(mkurl "$distroot" "$distname" "$version" "$archive")
|
||||
tmpfi="bin/tmp/$distname.$archive"
|
||||
|
||||
download "$url" "$tmpfi"
|
||||
if [ $? -ne 0 ]; then
|
||||
die "failed to download $url to $tmpfi"
|
||||
fi
|
||||
|
||||
unarchive "$archive" "$tmpfi" "$outpath" "$distname"
|
||||
if [ $? -ne 0 ]; then
|
||||
die "failed to extract archive $tmpfi"
|
||||
fi
|
||||
@ -13,6 +13,10 @@ import (
|
||||
)
|
||||
|
||||
func BuiltinBootstrap() ([]peer.AddrInfo, error) {
|
||||
if DisableBuiltinAssets {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var out []peer.AddrInfo
|
||||
|
||||
b := rice.MustFindBox("bootstrap")
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
/dns4/lotus-bootstrap-0.dfw.fil-test.net/tcp/1347/p2p/12D3KooWHwGBSiLR5ts7KW9MgH4BMzC2iXe18kwAQ8Ee3LUd1jeR
|
||||
/dns4/lotus-bootstrap-1.dfw.fil-test.net/tcp/1347/p2p/12D3KooWCLFaawdhLGcSpiqg43DtZ9QzPQ6HcB8Vvyu2Cnta8UWc
|
||||
/dns4/lotus-bootstrap-0.fra.fil-test.net/tcp/1347/p2p/12D3KooWMmaL7eaUCF6tVAghVmgozxz4uztbuFUQv6dyFpHRarHR
|
||||
/dns4/lotus-bootstrap-1.fra.fil-test.net/tcp/1347/p2p/12D3KooWLLpNYoKdf9NgcWudBhXLdTcXncqAsTzozw1scMMu6nS5
|
||||
/dns4/lotus-bootstrap-0.sin.fil-test.net/tcp/1347/p2p/12D3KooWCNL9vXaXwNs3Bu8uRAJK4pxpCyPeM7jZLSDpJma1wrV8
|
||||
/dns4/lotus-bootstrap-1.sin.fil-test.net/tcp/1347/p2p/12D3KooWNGGxFda1eC5U2YKAgs4ypoFHn3Z3xHCsjmFdrCcytoxm
|
||||
/dns4/bootstrap-0.testnet.fildev.network/tcp/1347/p2p/12D3KooWJTUBUjtzWJGWU1XSiY21CwmHaCNLNYn2E7jqHEHyZaP7
|
||||
/dns4/bootstrap-1.testnet.fildev.network/tcp/1347/p2p/12D3KooW9yeKXha4hdrJKq74zEo99T8DhriQdWNoojWnnQbsgB3v
|
||||
/dns4/bootstrap-2.testnet.fildev.network/tcp/1347/p2p/12D3KooWCrx8yVG9U9Kf7w8KLN3Edkj5ZKDhgCaeMqQbcQUoB6CT
|
||||
/dns4/bootstrap-4.testnet.fildev.network/tcp/1347/p2p/12D3KooWPkL9LrKRQgHtq7kn9ecNhGU9QaziG8R5tX8v9v7t3h34
|
||||
/dns4/bootstrap-3.testnet.fildev.network/tcp/1347/p2p/12D3KooWKYSsbpgZ3HAjax5M1BXCwXLa6gVkUARciz7uN3FNtr7T
|
||||
/dns4/bootstrap-5.testnet.fildev.network/tcp/1347/p2p/12D3KooWQYzqnLASJAabyMpPb1GcWZvNSe7JDcRuhdRqonFoiK9W
|
||||
/dns4/lotus-bootstrap.forceup.cn/tcp/41778/p2p/12D3KooWFQsv3nRMUevZNWWsY1Wu6NUzUbawnWU5NcRhgKuJA37C
|
||||
/dns4/bootstrap-0.starpool.in/tcp/12757/p2p/12D3KooWGHpBMeZbestVEWkfdnC9u7p6uFHXL1n7m1ZBqsEmiUzz
|
||||
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
|
||||
/dns4/bootstrap-1.starpool.in/tcp/12757/p2p/12D3KooWQZrGH1PxSNZPum99M1zNvjNFM33d1AAu5DcvdHptuU7u
|
||||
10
build/clock.go
Normal file
10
build/clock.go
Normal file
@ -0,0 +1,10 @@
|
||||
package build
|
||||
|
||||
import "github.com/raulk/clock"
|
||||
|
||||
// Clock is the global clock for the system. In standard builds,
|
||||
// we use a real-time clock, which maps to the `time` package.
|
||||
//
|
||||
// Tests that need control of time can replace this variable with
|
||||
// clock.NewMock(). Always use real time for socket/stream deadlines.
|
||||
var Clock = clock.New()
|
||||
83
build/drand.go
Normal file
83
build/drand.go
Normal file
@ -0,0 +1,83 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
type DrandEnum int
|
||||
|
||||
func DrandConfigSchedule() dtypes.DrandSchedule {
|
||||
out := dtypes.DrandSchedule{}
|
||||
for start, config := range DrandSchedule {
|
||||
out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]})
|
||||
}
|
||||
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return out[i].Start < out[j].Start
|
||||
})
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
const (
|
||||
DrandMainnet DrandEnum = iota + 1
|
||||
DrandTestnet
|
||||
DrandDevnet
|
||||
DrandLocalnet
|
||||
DrandIncentinet
|
||||
)
|
||||
|
||||
var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{
|
||||
DrandMainnet: {
|
||||
Servers: []string{
|
||||
"https://api.drand.sh",
|
||||
"https://api2.drand.sh",
|
||||
"https://api3.drand.sh",
|
||||
},
|
||||
Relays: []string{
|
||||
"/dnsaddr/api.drand.sh/",
|
||||
"/dnsaddr/api2.drand.sh/",
|
||||
"/dnsaddr/api3.drand.sh/",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`,
|
||||
},
|
||||
DrandTestnet: {
|
||||
Servers: []string{
|
||||
"https://pl-eu.testnet.drand.sh",
|
||||
"https://pl-us.testnet.drand.sh",
|
||||
"https://pl-sin.testnet.drand.sh",
|
||||
},
|
||||
Relays: []string{
|
||||
"/dnsaddr/pl-eu.testnet.drand.sh/",
|
||||
"/dnsaddr/pl-us.testnet.drand.sh/",
|
||||
"/dnsaddr/pl-sin.testnet.drand.sh/",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
|
||||
},
|
||||
DrandDevnet: {
|
||||
Servers: []string{
|
||||
"https://dev1.drand.sh",
|
||||
"https://dev2.drand.sh",
|
||||
},
|
||||
Relays: []string{
|
||||
"/dnsaddr/dev1.drand.sh/",
|
||||
"/dnsaddr/dev2.drand.sh/",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`,
|
||||
},
|
||||
DrandIncentinet: {
|
||||
Servers: []string{
|
||||
"https://pl-eu.incentinet.drand.sh",
|
||||
"https://pl-us.incentinet.drand.sh",
|
||||
"https://pl-sin.incentinet.drand.sh",
|
||||
},
|
||||
Relays: []string{
|
||||
"/dnsaddr/pl-eu.incentinet.drand.sh/",
|
||||
"/dnsaddr/pl-us.incentinet.drand.sh/",
|
||||
"/dnsaddr/pl-sin.incentinet.drand.sh/",
|
||||
},
|
||||
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
|
||||
},
|
||||
}
|
||||
15
build/flags.go
Normal file
15
build/flags.go
Normal file
@ -0,0 +1,15 @@
|
||||
package build
|
||||
|
||||
// DisableBuiltinAssets disables the resolution of go.rice boxes that store
|
||||
// built-in assets, such as proof parameters, bootstrap peers, genesis blocks,
|
||||
// etc.
|
||||
//
|
||||
// When this value is set to true, it is expected that the user will
|
||||
// provide any such configurations through the Lotus API itself.
|
||||
//
|
||||
// This is useful when you're using Lotus as a library, such as to orchestrate
|
||||
// test scenarios, or for other purposes where you don't need to use the
|
||||
// defaults shipped with the binary.
|
||||
//
|
||||
// For this flag to be effective, it must be enabled _before_ instantiating Lotus.
|
||||
var DisableBuiltinAssets = false
|
||||
@ -1,4 +1 @@
|
||||
package build
|
||||
|
||||
const ForkCCM = 1750
|
||||
const ForkNoPowerEPSUpdates = 16450
|
||||
|
||||
@ -2,7 +2,7 @@ package build
|
||||
|
||||
import (
|
||||
rice "github.com/GeertJohan/go.rice"
|
||||
logging "github.com/ipfs/go-log"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
)
|
||||
|
||||
// moved from now-defunct build/paramfetch.go
|
||||
@ -11,12 +11,12 @@ var log = logging.Logger("build")
|
||||
func MaybeGenesis() []byte {
|
||||
builtinGen, err := rice.FindBox("genesis")
|
||||
if err != nil {
|
||||
log.Warn("loading built-in genesis: %s", err)
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
return nil
|
||||
}
|
||||
genBytes, err := builtinGen.Bytes("devnet.car")
|
||||
if err != nil {
|
||||
log.Warn("loading built-in genesis: %s", err)
|
||||
log.Warnf("loading built-in genesis: %s", err)
|
||||
}
|
||||
|
||||
return genBytes
|
||||
|
||||
Binary file not shown.
@ -2,4 +2,6 @@ package build
|
||||
|
||||
import rice "github.com/GeertJohan/go.rice"
|
||||
|
||||
var ParametersJson = rice.MustFindBox("proof-params").MustBytes("parameters.json")
|
||||
func ParametersJSON() []byte {
|
||||
return rice.MustFindBox("proof-params").MustBytes("parameters.json")
|
||||
}
|
||||
|
||||
41
build/params_2k.go
Normal file
41
build/params_2k.go
Normal file
@ -0,0 +1,41 @@
|
||||
// +build debug 2k
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
)
|
||||
|
||||
const UpgradeBreezeHeight = -1
|
||||
const BreezeGasTampingDuration = 0
|
||||
|
||||
const UpgradeSmokeHeight = -1
|
||||
const UpgradeIgnitionHeight = -2
|
||||
const UpgradeLiftoffHeight = -3
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
func init() {
|
||||
policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1)
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048))
|
||||
policy.SetMinVerifiedDealSize(abi.NewStoragePower(256))
|
||||
|
||||
BuildType |= Build2k
|
||||
}
|
||||
|
||||
const BlockDelaySecs = uint64(4)
|
||||
|
||||
const PropagationDelaySecs = uint64(1)
|
||||
|
||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||
// which the miner is slashed
|
||||
//
|
||||
// Epochs
|
||||
const SlashablePowerDelay = 20
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepConfidence = 6
|
||||
@ -2,36 +2,9 @@
|
||||
|
||||
package build
|
||||
|
||||
import "os"
|
||||
|
||||
var SectorSizes = []uint64{1024}
|
||||
|
||||
// Seconds
|
||||
const BlockDelay = 6
|
||||
|
||||
const PropagationDelay = 3
|
||||
|
||||
// FallbackPoStDelay is the number of epochs the miner needs to wait after
|
||||
// ElectionPeriodStart before starting fallback post computation
|
||||
//
|
||||
// Epochs
|
||||
const FallbackPoStDelay = 10
|
||||
|
||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||
// which the miner is slashed
|
||||
//
|
||||
// Epochs
|
||||
const SlashablePowerDelay = 20
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepDelay = 2
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepConfidence = 6
|
||||
|
||||
// Bytes
|
||||
var MinimumMinerPower uint64 = 2 << 10 // 2KiB
|
||||
|
||||
func init() {
|
||||
os.Setenv("TRUST_PARAMS", "1")
|
||||
InsecurePoStValidation = true
|
||||
BuildType |= BuildDebug
|
||||
}
|
||||
|
||||
// NOTE: Also includes settings from params_2k
|
||||
|
||||
@ -1,99 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Core network constants
|
||||
|
||||
// /////
|
||||
// Storage
|
||||
|
||||
const UnixfsChunkSize uint64 = 1 << 20
|
||||
const UnixfsLinksPerLevel = 1024
|
||||
|
||||
func SupportedSectorSize(ssize uint64) bool {
|
||||
for _, ss := range SectorSizes {
|
||||
if ssize == ss {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// /////
|
||||
// Payments
|
||||
|
||||
// Epochs
|
||||
const PaymentChannelClosingDelay = 6 * 60 * 60 / BlockDelay // six hours
|
||||
|
||||
// /////
|
||||
// Consensus / Network
|
||||
|
||||
// Seconds
|
||||
const AllowableClockDrift = 1
|
||||
|
||||
// Epochs
|
||||
const ForkLengthThreshold = Finality
|
||||
|
||||
// Blocks (e)
|
||||
const BlocksPerEpoch = 5
|
||||
|
||||
// Epochs
|
||||
const Finality = 500
|
||||
|
||||
// constants for Weight calculation
|
||||
// The ratio of weight contributed by short-term vs long-term factors in a given round
|
||||
const WRatioNum = int64(1)
|
||||
const WRatioDen = 2
|
||||
|
||||
// /////
|
||||
// Proofs
|
||||
|
||||
// Epochs
|
||||
const SealRandomnessLookback = Finality
|
||||
|
||||
// Epochs
|
||||
const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
|
||||
|
||||
// /////
|
||||
// Mining
|
||||
|
||||
// Epochs
|
||||
const EcRandomnessLookback = 300
|
||||
|
||||
const PowerCollateralProportion = 5
|
||||
const PerCapitaCollateralProportion = 1
|
||||
const CollateralPrecision = 1000
|
||||
|
||||
// /////
|
||||
// Devnet settings
|
||||
|
||||
const TotalFilecoin = 2_000_000_000
|
||||
const MiningRewardTotal = 1_400_000_000
|
||||
|
||||
const InitialRewardStr = "153856861913558700202"
|
||||
|
||||
var InitialReward *big.Int
|
||||
|
||||
const FilecoinPrecision = 1_000_000_000_000_000_000
|
||||
|
||||
// TODO: Move other important consts here
|
||||
|
||||
func init() {
|
||||
InitialReward = new(big.Int)
|
||||
|
||||
var ok bool
|
||||
InitialReward, ok = InitialReward.
|
||||
SetString(InitialRewardStr, 10)
|
||||
if !ok {
|
||||
panic("could not parse InitialRewardStr")
|
||||
}
|
||||
}
|
||||
|
||||
// Sync
|
||||
const BadBlockCacheSize = 1 << 15
|
||||
|
||||
// assuming 4000 messages per round, this lets us not lose any messages across a
|
||||
// 10 block reorg.
|
||||
const BlsSignatureCacheSize = 40000
|
||||
52
build/params_shared_funcs.go
Normal file
52
build/params_shared_funcs.go
Normal file
@ -0,0 +1,52 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/protocol"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
func DefaultSectorSize() abi.SectorSize {
|
||||
szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes))
|
||||
for spt := range miner0.SupportedProofTypes {
|
||||
ss, err := spt.SectorSize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
szs = append(szs, ss)
|
||||
}
|
||||
|
||||
sort.Slice(szs, func(i, j int) bool {
|
||||
return szs[i] < szs[j]
|
||||
})
|
||||
|
||||
return szs[0]
|
||||
}
|
||||
|
||||
// Core network constants
|
||||
|
||||
func BlocksTopic(netName dtypes.NetworkName) string { return "/fil/blocks/" + string(netName) }
|
||||
func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + string(netName) }
|
||||
func DhtProtocolName(netName dtypes.NetworkName) protocol.ID {
|
||||
return protocol.ID("/fil/kad/" + string(netName))
|
||||
}
|
||||
|
||||
func UseNewestNetwork() bool {
|
||||
// TODO: Put these in a container we can iterate over
|
||||
if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func SetAddressNetwork(n address.Network) {
|
||||
address.CurrentNetwork = n
|
||||
}
|
||||
121
build/params_shared_vals.go
Normal file
121
build/params_shared_vals.go
Normal file
@ -0,0 +1,121 @@
|
||||
// +build !testground
|
||||
|
||||
package build
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
)
|
||||
|
||||
// /////
|
||||
// Storage
|
||||
|
||||
const UnixfsChunkSize uint64 = 1 << 20
|
||||
const UnixfsLinksPerLevel = 1024
|
||||
|
||||
// /////
|
||||
// Consensus / Network
|
||||
|
||||
const AllowableClockDriftSecs = uint64(1)
|
||||
const NewestNetworkVersion = network.Version3
|
||||
const ActorUpgradeNetworkVersion = network.Version4
|
||||
|
||||
// Epochs
|
||||
const ForkLengthThreshold = Finality
|
||||
|
||||
// Blocks (e)
|
||||
var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
|
||||
|
||||
// Epochs
|
||||
const Finality = miner0.ChainFinality
|
||||
const MessageConfidence = uint64(5)
|
||||
|
||||
// constants for Weight calculation
|
||||
// The ratio of weight contributed by short-term vs long-term factors in a given round
|
||||
const WRatioNum = int64(1)
|
||||
const WRatioDen = uint64(2)
|
||||
|
||||
// /////
|
||||
// Proofs
|
||||
|
||||
// Epochs
|
||||
const SealRandomnessLookback = Finality
|
||||
|
||||
// Epochs
|
||||
const SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 // TODO: Get from spec specs-actors
|
||||
|
||||
// Maximum lookback that randomness can be sourced from for a seal proof submission
|
||||
const MaxSealLookback = SealRandomnessLookbackLimit + 2000 // TODO: Get from specs-actors
|
||||
|
||||
// /////
|
||||
// Mining
|
||||
|
||||
// Epochs
|
||||
const TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||
|
||||
const WinningPoStSectorSetLookback = abi.ChainEpoch(10)
|
||||
|
||||
// /////
|
||||
// Address
|
||||
|
||||
const AddressMainnetEnvVar = "_mainnet_"
|
||||
|
||||
// /////
|
||||
// Devnet settings
|
||||
|
||||
var Devnet = true
|
||||
|
||||
const FilBase = uint64(2_000_000_000)
|
||||
const FilAllocStorageMining = uint64(1_100_000_000)
|
||||
|
||||
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
|
||||
|
||||
var InitialRewardBalance *big.Int
|
||||
|
||||
// TODO: Move other important consts here
|
||||
|
||||
func init() {
|
||||
InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining))
|
||||
InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision)))
|
||||
|
||||
if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar {
|
||||
SetAddressNetwork(address.Mainnet)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync
|
||||
const BadBlockCacheSize = 1 << 15
|
||||
|
||||
// assuming 4000 messages per round, this lets us not lose any messages across a
|
||||
// 10 block reorg.
|
||||
const BlsSignatureCacheSize = 40000
|
||||
|
||||
// Size of signature verification cache
|
||||
// 32k keeps the cache around 10MB in size, max
|
||||
const VerifSigCacheSize = 32000
|
||||
|
||||
// ///////
|
||||
// Limits
|
||||
|
||||
// TODO: If this is gonna stay, it should move to specs-actors
|
||||
const BlockMessageLimit = 10000
|
||||
|
||||
const BlockGasLimit = 10_000_000_000
|
||||
const BlockGasTarget = BlockGasLimit / 2
|
||||
const BaseFeeMaxChangeDenom = 8 // 12.5%
|
||||
const InitialBaseFee = 100e6
|
||||
const MinimumBaseFee = 100
|
||||
const PackingEfficiencyNum = 4
|
||||
const PackingEfficiencyDenom = 5
|
||||
|
||||
// Actor consts
|
||||
// TODO: Pull from actors when its made not private
|
||||
var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay)
|
||||
89
build/params_testground.go
Normal file
89
build/params_testground.go
Normal file
@ -0,0 +1,89 @@
|
||||
// +build testground
|
||||
|
||||
// This file makes hardcoded parameters (const) configurable as vars.
|
||||
//
|
||||
// Its purpose is to unlock various degrees of flexibility and parametrization
|
||||
// when writing Testground plans for Lotus.
|
||||
//
|
||||
package build
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
)
|
||||
|
||||
var (
|
||||
UnixfsChunkSize = uint64(1 << 20)
|
||||
UnixfsLinksPerLevel = 1024
|
||||
|
||||
BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch)
|
||||
BlockMessageLimit = 512
|
||||
BlockGasLimit = int64(100_000_000_000)
|
||||
BlockGasTarget = int64(BlockGasLimit / 2)
|
||||
BaseFeeMaxChangeDenom = int64(8) // 12.5%
|
||||
InitialBaseFee = int64(100e6)
|
||||
MinimumBaseFee = int64(100)
|
||||
BlockDelaySecs = uint64(builtin.EpochDurationSeconds)
|
||||
PropagationDelaySecs = uint64(6)
|
||||
|
||||
AllowableClockDriftSecs = uint64(1)
|
||||
|
||||
Finality = miner0.ChainFinality
|
||||
ForkLengthThreshold = Finality
|
||||
|
||||
SlashablePowerDelay = 20
|
||||
InteractivePoRepConfidence = 6
|
||||
|
||||
MessageConfidence uint64 = 5
|
||||
|
||||
WRatioNum = int64(1)
|
||||
WRatioDen = uint64(2)
|
||||
|
||||
BadBlockCacheSize = 1 << 15
|
||||
BlsSignatureCacheSize = 40000
|
||||
VerifSigCacheSize = 32000
|
||||
|
||||
SealRandomnessLookback = Finality
|
||||
SealRandomnessLookbackLimit = SealRandomnessLookback + 2000
|
||||
MaxSealLookback = SealRandomnessLookbackLimit + 2000
|
||||
|
||||
TicketRandomnessLookback = abi.ChainEpoch(1)
|
||||
WinningPoStSectorSetLookback = abi.ChainEpoch(10)
|
||||
|
||||
FilBase uint64 = 2_000_000_000
|
||||
FilAllocStorageMining uint64 = 1_400_000_000
|
||||
|
||||
FilecoinPrecision uint64 = 1_000_000_000_000_000_000
|
||||
|
||||
InitialRewardBalance = func() *big.Int {
|
||||
v := big.NewInt(int64(FilAllocStorageMining))
|
||||
v = v.Mul(v, big.NewInt(int64(FilecoinPrecision)))
|
||||
return v
|
||||
}()
|
||||
// Actor consts
|
||||
// TODO: Pull from actors when its made not private
|
||||
MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay)
|
||||
|
||||
PackingEfficiencyNum int64 = 4
|
||||
PackingEfficiencyDenom int64 = 5
|
||||
|
||||
UpgradeBreezeHeight abi.ChainEpoch = -1
|
||||
BreezeGasTampingDuration abi.ChainEpoch = 0
|
||||
|
||||
UpgradeSmokeHeight abi.ChainEpoch = -1
|
||||
UpgradeIgnitionHeight abi.ChainEpoch = -2
|
||||
UpgradeLiftoffHeight abi.ChainEpoch = -3
|
||||
|
||||
DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandMainnet,
|
||||
}
|
||||
|
||||
NewestNetworkVersion = network.Version2
|
||||
ActorUpgradeNetworkVersion = network.Version3
|
||||
|
||||
Devnet = true
|
||||
)
|
||||
@ -1,34 +1,42 @@
|
||||
// +build !debug
|
||||
// +build !2k
|
||||
// +build !testground
|
||||
|
||||
package build
|
||||
|
||||
var SectorSizes = []uint64{
|
||||
1 << 30,
|
||||
32 << 30,
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/policy"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
)
|
||||
|
||||
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
|
||||
0: DrandIncentinet,
|
||||
UpgradeSmokeHeight: DrandMainnet,
|
||||
}
|
||||
|
||||
// Seconds
|
||||
const BlockDelay = 45
|
||||
const UpgradeBreezeHeight = 41280
|
||||
const BreezeGasTampingDuration = 120
|
||||
|
||||
const PropagationDelay = 6
|
||||
const UpgradeSmokeHeight = 51000
|
||||
|
||||
// FallbackPoStDelay is the number of epochs the miner needs to wait after
|
||||
// ElectionPeriodStart before starting fallback post computation
|
||||
//
|
||||
// Epochs
|
||||
const FallbackPoStDelay = 30
|
||||
const UpgradeIgnitionHeight = 94000
|
||||
|
||||
// SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after
|
||||
// which the miner is slashed
|
||||
//
|
||||
// Epochs
|
||||
const SlashablePowerDelay = 200
|
||||
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
|
||||
// Miners, clients, developers, custodians all need time to prepare.
|
||||
// We still have upgrades and state changes to do, but can happen after signaling timing here.
|
||||
const UpgradeLiftoffHeight = 148888
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepDelay = 8
|
||||
func init() {
|
||||
policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40))
|
||||
policy.SetSupportedProofTypes(
|
||||
abi.RegisteredSealProof_StackedDrg32GiBV1,
|
||||
abi.RegisteredSealProof_StackedDrg64GiBV1,
|
||||
)
|
||||
Devnet = false
|
||||
}
|
||||
|
||||
// Epochs
|
||||
const InteractivePoRepConfidence = 6
|
||||
const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds)
|
||||
|
||||
// Bytes
|
||||
var MinimumMinerPower uint64 = 512 << 30 // 512GB
|
||||
const PropagationDelaySecs = uint64(6)
|
||||
|
||||
@ -1,103 +1,152 @@
|
||||
{
|
||||
"v20-proof-of-spacetime-election-5f585aca354eb68e411c8582ed0efd800792430e4e76d73468c4fc03f1a8d6d2.params": {
|
||||
"cid": "QmX7tYeNPWae2fjZ3Am6GB9dmHvLqvoz8dKo3PR98VYxH9",
|
||||
"digest": "39a9edec3355516674f0d12b926be493",
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": {
|
||||
"cid": "QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR",
|
||||
"digest": "7610b9f82bfc88405b7a832b651ce2f6",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": {
|
||||
"cid": "QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X",
|
||||
"digest": "0e0958009936b9d5e515ec97b8cb792d",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": {
|
||||
"cid": "QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR",
|
||||
"digest": "1a7d4a9c8a502a497ed92a54366af33f",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": {
|
||||
"cid": "QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV",
|
||||
"digest": "4dae975de4f011f101f5a2f86d1daaba",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": {
|
||||
"cid": "QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS",
|
||||
"digest": "82c88066be968bb550a05e30ff6c2413",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": {
|
||||
"cid": "QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU",
|
||||
"digest": "ffd79788d614d27919ae5bd2d94eacb6",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": {
|
||||
"cid": "QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP",
|
||||
"digest": "700171ecf7334e3199437c930676af82",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": {
|
||||
"cid": "QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG",
|
||||
"digest": "79ebb55f56fda427743e35053edad8fc",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": {
|
||||
"cid": "QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx",
|
||||
"digest": "c49499bb76a0762884896f9683403f55",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": {
|
||||
"cid": "QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc",
|
||||
"digest": "34d4feeacd9abf788d69ef1bb4d8fd00",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": {
|
||||
"cid": "QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT",
|
||||
"digest": "827359440349fe8f5a016e7598993b79",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": {
|
||||
"cid": "QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN",
|
||||
"digest": "bd2cd62f65c1ab84f19ca27e97b7c731",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": {
|
||||
"cid": "QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ",
|
||||
"digest": "2cf49eb26f1fee94c85781a390ddb4c8",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v20-proof-of-spacetime-election-5f585aca354eb68e411c8582ed0efd800792430e4e76d73468c4fc03f1a8d6d2.vk": {
|
||||
"cid": "QmbNGx7pNbGiEr8ykoHxVXHW2LNSmGdsxKtj1onZCyguCX",
|
||||
"digest": "0227ae7df4f2affe529ebafbbc7540ee",
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": {
|
||||
"cid": "QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE",
|
||||
"digest": "0f8ec542485568fa3468c066e9fed82b",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v20-proof-of-spacetime-election-a4e18190d4b4657ba1b4d08a341871b2a6f398e327cb9951b28ab141fbdbf49d.params": {
|
||||
"cid": "QmRGZsNp4mp1cZshcXqt3VMuWscAEsiMa2iepF4CsWWoiv",
|
||||
"digest": "991041a354b12c280542741f58c7f2ca",
|
||||
"sector_size": 1024
|
||||
},
|
||||
"v20-proof-of-spacetime-election-a4e18190d4b4657ba1b4d08a341871b2a6f398e327cb9951b28ab141fbdbf49d.vk": {
|
||||
"cid": "QmWpmrhCGVcfqLyqp5oGAnhPmCE5hGTPaauHi25mpQwRSU",
|
||||
"digest": "91fac550e1f9bccab213830bb0c85bd6",
|
||||
"sector_size": 1024
|
||||
},
|
||||
"v20-proof-of-spacetime-election-a9eb6d90b896a282ec2d3a875c6143e3fcff778f0da1460709e051833651559b.params": {
|
||||
"cid": "QmenSZXh1EsSyHiSRvA6wb8yaPhYBTjrKehJw96Px5HnN4",
|
||||
"digest": "6322eacd2773163ddd51f9ca7d645fc4",
|
||||
"sector_size": 1073741824
|
||||
},
|
||||
"v20-proof-of-spacetime-election-a9eb6d90b896a282ec2d3a875c6143e3fcff778f0da1460709e051833651559b.vk": {
|
||||
"cid": "QmPvZoMKofw6eDhDg5ESJA2QAZP8HvM6qMQk7fw4pq9bQf",
|
||||
"digest": "0df62745fceac922e3e70847cfc70b52",
|
||||
"sector_size": 1073741824
|
||||
},
|
||||
"v20-proof-of-spacetime-election-bf872523641b1de33553db2a177df13e412d7b3b0103e6696ae0a1cf5d525259.params": {
|
||||
"cid": "QmVibFqzkZoL8cwQmzj8njPokCQGCCx4pBcUH77bzgJgV9",
|
||||
"digest": "de9d71e672f286706a1673bd57abdaac",
|
||||
"sector_size": 16777216
|
||||
},
|
||||
"v20-proof-of-spacetime-election-bf872523641b1de33553db2a177df13e412d7b3b0103e6696ae0a1cf5d525259.vk": {
|
||||
"cid": "QmZa5FX27XyiEXQQLQpHqtMJKLzrcY8wMuj3pxzmSimSyu",
|
||||
"digest": "7f796d3a0f13499181e44b5eee0cc744",
|
||||
"sector_size": 16777216
|
||||
},
|
||||
"v20-proof-of-spacetime-election-ffc3fb192364238b60977839d14e3154d4a98313e30d46694a12af54b6874975.params": {
|
||||
"cid": "Qmbt2SWWAmMcYoY3DAiRDXA8fAuqdqRLWucJMSxYmzBCmN",
|
||||
"digest": "151ae0ae183fc141e8c2bebc28e5cc10",
|
||||
"sector_size": 268435456
|
||||
},
|
||||
"v20-proof-of-spacetime-election-ffc3fb192364238b60977839d14e3154d4a98313e30d46694a12af54b6874975.vk": {
|
||||
"cid": "QmUxvPu4xdVmjMFihUKoYyEdXBqxsXkvmxRweU7KouWHji",
|
||||
"digest": "95eb89588e9d1832aca044c3a13178af",
|
||||
"sector_size": 268435456
|
||||
},
|
||||
"v20-stacked-proof-of-replication-117839dacd1ef31e5968a6fd13bcd6fa86638d85c40c9241a1d07c2a954eb89b.params": {
|
||||
"cid": "QmQZe8eLo2xXbhSDxtyYZNqEjqjdcWGdADywECRvNEZQdX",
|
||||
"digest": "fcd50e2e08a8560a6bb3418e883567ed",
|
||||
"sector_size": 268435456
|
||||
},
|
||||
"v20-stacked-proof-of-replication-117839dacd1ef31e5968a6fd13bcd6fa86638d85c40c9241a1d07c2a954eb89b.vk": {
|
||||
"cid": "Qme1hn6QT1covfoUFGDZkqoE1pMTax9FNW3nWWmTNqFe7y",
|
||||
"digest": "872e244d86499fd659082e3bcf3f13e7",
|
||||
"sector_size": 268435456
|
||||
},
|
||||
"v20-stacked-proof-of-replication-b46f3a1051afbb67f70aae7082da95def62eee943662f3e1bf69837fb08aaae4.params": {
|
||||
"cid": "QmSfrPDC9jwY4MKrjzhCqDBBAG44wSDM8oE5NuDwWSh2xN",
|
||||
"digest": "0a338b941c5f17946340de5fc95cab30",
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": {
|
||||
"cid": "Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i",
|
||||
"digest": "d84f79a16fe40e9e25a36e2107bb1ba0",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v20-stacked-proof-of-replication-b46f3a1051afbb67f70aae7082da95def62eee943662f3e1bf69837fb08aaae4.vk": {
|
||||
"cid": "QmTDGynCmnbaZNBP3Bv3F3duC3ecKRubCKeMUiQQZYbGpF",
|
||||
"digest": "c752e070a6b7aa8b79aa661a6b600b55",
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": {
|
||||
"cid": "QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF",
|
||||
"digest": "fc02943678dd119e69e7fab8420e8819",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v20-stacked-proof-of-replication-e71093863cadc71de61f38311ee45816633973bbf34849316b147f8d2e66f199.params": {
|
||||
"cid": "QmXjSSnMUnc7EjQBYtTHhvLU3kXJTbUyhVhJRSTRehh186",
|
||||
"digest": "efa407fd09202dffd15799a8518e73d3",
|
||||
"sector_size": 1024
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": {
|
||||
"cid": "QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V",
|
||||
"digest": "3810b7780ac0e299b22ae70f1f94c9bc",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v20-stacked-proof-of-replication-e71093863cadc71de61f38311ee45816633973bbf34849316b147f8d2e66f199.vk": {
|
||||
"cid": "QmYHW3zhQouDP4okFbXSsRMcZ8bokKGvzxqbv7ZrunPMiG",
|
||||
"digest": "b2f09a0ccb62da28c890d5b881c8dcd2",
|
||||
"sector_size": 1024
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": {
|
||||
"cid": "QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7",
|
||||
"digest": "59d2bf1857adc59a4f08fcf2afaa916b",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v20-stacked-proof-of-replication-e99a585174b6a45b254ba4780d72c89ad808c305c6d11711009ade4f39dba8e9.params": {
|
||||
"cid": "QmUhyfNeLb32LfSkjsUwTFYLXQGMj6JQ8daff4DdVMt79q",
|
||||
"digest": "b53c1916a63839ec345aa2224e9198b7",
|
||||
"sector_size": 1073741824
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": {
|
||||
"cid": "QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz",
|
||||
"digest": "2170a91ad5bae22ea61f2ea766630322",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v20-stacked-proof-of-replication-e99a585174b6a45b254ba4780d72c89ad808c305c6d11711009ade4f39dba8e9.vk": {
|
||||
"cid": "QmWReGfbuoozNErbskmFvqV4q36BY6F2WWb4cVFc3zoYkA",
|
||||
"digest": "20d58a3fae7343481f8298a2dd493dd7",
|
||||
"sector_size": 1073741824
|
||||
"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": {
|
||||
"cid": "QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm",
|
||||
"digest": "6d3789148fb6466d07ee1e24d6292fd6",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v20-stacked-proof-of-replication-f571ee2386f4c65a68e802747f2d78691006fc81a67971c4d9641403fffece16.params": {
|
||||
"cid": "QmSAHu14Pe8iav6BYCt9XkpHJ73XM7tcpY4d9JK9BST9HU",
|
||||
"digest": "7698426202c7e07b26ef056d31485b3a",
|
||||
"sector_size": 16777216
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": {
|
||||
"cid": "QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h",
|
||||
"digest": "434fb1338ecfaf0f59256f30dde4968f",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v20-stacked-proof-of-replication-f571ee2386f4c65a68e802747f2d78691006fc81a67971c4d9641403fffece16.vk": {
|
||||
"cid": "QmaKtFLShnhMGVn7P9UsHjkgqtqRFSwCStqqykBN7u8dax",
|
||||
"digest": "834408e5c3fce6ec5d1bf64e64cee94e",
|
||||
"sector_size": 16777216
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": {
|
||||
"cid": "QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr",
|
||||
"digest": "dc1ade9929ade1708238f155343044ac",
|
||||
"sector_size": 2048
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": {
|
||||
"cid": "QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC",
|
||||
"digest": "6c77597eb91ab936c1cef4cf19eba1b3",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": {
|
||||
"cid": "QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH",
|
||||
"digest": "065179da19fbe515507267677f02823e",
|
||||
"sector_size": 536870912
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": {
|
||||
"cid": "QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH",
|
||||
"digest": "09e612e4eeb7a0eb95679a88404f960c",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": {
|
||||
"cid": "QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99",
|
||||
"digest": "b687beb9adbd9dabe265a7e3620813e4",
|
||||
"sector_size": 8388608
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": {
|
||||
"cid": "QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ",
|
||||
"digest": "6a388072a518cf46ebd661f5cc46900a",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": {
|
||||
"cid": "Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb",
|
||||
"digest": "0c7b4aac1c40fdb7eb82bc355b41addf",
|
||||
"sector_size": 34359738368
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": {
|
||||
"cid": "QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX",
|
||||
"digest": "1801f8a6e1b00bceb00cc27314bb5ce3",
|
||||
"sector_size": 68719476736
|
||||
},
|
||||
"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": {
|
||||
"cid": "QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN",
|
||||
"digest": "a89884252c04c298d0b3c81bfd884164",
|
||||
"sector_size": 68719476736
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,13 +1,39 @@
|
||||
package build
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var CurrentCommit string
|
||||
var BuildType int
|
||||
|
||||
const (
|
||||
BuildDefault = 0
|
||||
Build2k = 0x1
|
||||
BuildDebug = 0x3
|
||||
)
|
||||
|
||||
func buildType() string {
|
||||
switch BuildType {
|
||||
case BuildDefault:
|
||||
return ""
|
||||
case BuildDebug:
|
||||
return "+debug"
|
||||
case Build2k:
|
||||
return "+2k"
|
||||
default:
|
||||
return "+huh?"
|
||||
}
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version, set by build system
|
||||
const BuildVersion = "0.1.6"
|
||||
const BuildVersion = "0.8.0"
|
||||
|
||||
var UserVersion = BuildVersion + CurrentCommit
|
||||
func UserVersion() string {
|
||||
return BuildVersion + buildType() + CurrentCommit
|
||||
}
|
||||
|
||||
type Version uint32
|
||||
|
||||
@ -30,9 +56,39 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
|
||||
return ve&minorMask == v2&minorMask
|
||||
}
|
||||
|
||||
// APIVersion is a semver version of the rpc api exposed
|
||||
var APIVersion Version = newVer(0, 1, 6)
|
||||
type NodeType int
|
||||
|
||||
const (
|
||||
NodeUnknown NodeType = iota
|
||||
|
||||
NodeFull
|
||||
NodeMiner
|
||||
NodeWorker
|
||||
)
|
||||
|
||||
var RunningNodeType NodeType
|
||||
|
||||
func VersionForType(nodeType NodeType) (Version, error) {
|
||||
switch nodeType {
|
||||
case NodeFull:
|
||||
return FullAPIVersion, nil
|
||||
case NodeMiner:
|
||||
return MinerAPIVersion, nil
|
||||
case NodeWorker:
|
||||
return WorkerAPIVersion, nil
|
||||
default:
|
||||
return Version(0), xerrors.Errorf("unknown node type %d", nodeType)
|
||||
}
|
||||
}
|
||||
|
||||
// semver versions of the rpc api exposed
|
||||
var (
|
||||
FullAPIVersion = newVer(0, 16, 0)
|
||||
MinerAPIVersion = newVer(0, 15, 0)
|
||||
WorkerAPIVersion = newVer(0, 15, 0)
|
||||
)
|
||||
|
||||
//nolint:varcheck,deadcode
|
||||
const (
|
||||
majorMask = 0xff0000
|
||||
minorMask = 0xffff00
|
||||
|
||||
@ -1,48 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type CronActor struct{}
|
||||
|
||||
type callTuple struct {
|
||||
addr address.Address
|
||||
method uint64
|
||||
}
|
||||
|
||||
var CronActors = []callTuple{
|
||||
{StoragePowerAddress, SPAMethods.CheckProofSubmissions},
|
||||
}
|
||||
|
||||
type CronActorState struct{}
|
||||
|
||||
type cAMethods struct {
|
||||
EpochTick uint64
|
||||
}
|
||||
|
||||
var CAMethods = cAMethods{2}
|
||||
|
||||
func (ca CronActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
1: nil,
|
||||
2: ca.EpochTick,
|
||||
}
|
||||
}
|
||||
|
||||
func (ca CronActor) EpochTick(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
if vmctx.Message().From != CronAddress {
|
||||
return nil, aerrors.New(1, "EpochTick is only callable as a part of tipset state computation")
|
||||
}
|
||||
|
||||
for _, call := range CronActors {
|
||||
_, err := vmctx.Send(call.addr, call.method, types.NewInt(0), nil)
|
||||
if err != nil {
|
||||
return nil, err // todo: this very bad?
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@ -1,242 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-hamt-ipld"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
logging "github.com/ipfs/go-log"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
var log = logging.Logger("actors")
|
||||
|
||||
var EmptyCBOR cid.Cid
|
||||
|
||||
const (
|
||||
GasCreateActor = 100
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
n, err := cbor.WrapObject(map[string]string{}, mh.SHA2_256, -1)
|
||||
if err != nil {
|
||||
panic(err) // ok
|
||||
}
|
||||
|
||||
EmptyCBOR = n.Cid()
|
||||
}
|
||||
|
||||
type InitActor struct{}
|
||||
|
||||
type InitActorState struct {
|
||||
AddressMap cid.Cid
|
||||
|
||||
NextID uint64
|
||||
}
|
||||
|
||||
type iAMethods struct {
|
||||
Exec uint64
|
||||
}
|
||||
|
||||
var IAMethods = iAMethods{2}
|
||||
|
||||
func (ia InitActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
1: nil,
|
||||
2: ia.Exec,
|
||||
}
|
||||
}
|
||||
|
||||
type ExecParams struct {
|
||||
Code cid.Cid
|
||||
Params []byte
|
||||
}
|
||||
|
||||
func CreateExecParams(act cid.Cid, obj cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
|
||||
encparams, err := SerializeParams(obj)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "creating ExecParams")
|
||||
}
|
||||
|
||||
return SerializeParams(&ExecParams{
|
||||
Code: act,
|
||||
Params: encparams,
|
||||
})
|
||||
}
|
||||
|
||||
func (ia InitActor) Exec(act *types.Actor, vmctx types.VMContext, p *ExecParams) ([]byte, aerrors.ActorError) {
|
||||
beginState := vmctx.Storage().GetHead()
|
||||
|
||||
var self InitActorState
|
||||
if err := vmctx.Storage().Get(beginState, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.ChargeGas(GasCreateActor); err != nil {
|
||||
return nil, aerrors.Wrap(err, "run out of gas")
|
||||
}
|
||||
|
||||
// Make sure that only the actors defined in the spec can be launched.
|
||||
if !IsBuiltinActor(p.Code) {
|
||||
return nil, aerrors.New(1,
|
||||
"cannot launch actor instance that is not a builtin actor")
|
||||
}
|
||||
|
||||
// Ensure that singletons can be only launched once.
|
||||
// TODO: do we want to enforce this? If so how should actors be marked as such?
|
||||
if IsSingletonActor(p.Code) {
|
||||
return nil, aerrors.New(1, "cannot launch another actor of this type")
|
||||
}
|
||||
|
||||
// This generates a unique address for this actor that is stable across message
|
||||
// reordering
|
||||
creator := vmctx.Message().From
|
||||
nonce := vmctx.Message().Nonce
|
||||
addr, err := ComputeActorAddress(creator, nonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set up the actor itself
|
||||
actor := types.Actor{
|
||||
Code: p.Code,
|
||||
Balance: types.NewInt(0),
|
||||
Head: EmptyCBOR,
|
||||
Nonce: 0,
|
||||
}
|
||||
|
||||
// The call to the actors constructor will set up the initial state
|
||||
// from the given parameters, setting `actor.Head` to a new value when successful.
|
||||
// TODO: can constructors fail?
|
||||
//actor.Constructor(p.Params)
|
||||
|
||||
// Store the mapping of address to actor ID.
|
||||
idAddr, nerr := self.AddActor(vmctx.Ipld(), addr)
|
||||
if nerr != nil {
|
||||
return nil, aerrors.Escalate(err, "adding new actor mapping")
|
||||
}
|
||||
|
||||
// NOTE: This is a privileged call that only the init actor is allowed to make
|
||||
// FIXME: Had to comment this because state is not in interface
|
||||
state, err := vmctx.StateTree()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := state.SetActor(idAddr, &actor); err != nil {
|
||||
if xerrors.Is(err, types.ErrActorNotFound) {
|
||||
return nil, aerrors.Absorb(err, 1, "SetActor, actor not found")
|
||||
}
|
||||
return nil, aerrors.Escalate(err, "inserting new actor into state tree")
|
||||
}
|
||||
|
||||
// '1' is reserved for constructor methods
|
||||
_, err = vmctx.Send(idAddr, 1, vmctx.Message().Value, p.Params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(beginState, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return idAddr.Bytes(), nil
|
||||
}
|
||||
|
||||
func IsBuiltinActor(code cid.Cid) bool {
|
||||
switch code {
|
||||
case StorageMarketCodeCid, StoragePowerCodeCid, StorageMinerCodeCid, AccountCodeCid, InitCodeCid, MultisigCodeCid, PaymentChannelCodeCid:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func IsSingletonActor(code cid.Cid) bool {
|
||||
return code == StoragePowerCodeCid || code == StorageMarketCodeCid || code == InitCodeCid || code == CronCodeCid
|
||||
}
|
||||
|
||||
func (ias *InitActorState) AddActor(cst *hamt.CborIpldStore, addr address.Address) (address.Address, error) {
|
||||
nid := ias.NextID
|
||||
|
||||
amap, err := hamt.LoadNode(context.TODO(), cst, ias.AddressMap)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
if err := amap.Set(context.TODO(), string(addr.Bytes()), nid); err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
if err := amap.Flush(context.TODO()); err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
|
||||
ncid, err := cst.Put(context.TODO(), amap)
|
||||
if err != nil {
|
||||
return address.Undef, err
|
||||
}
|
||||
ias.AddressMap = ncid
|
||||
ias.NextID++
|
||||
|
||||
return NewIDAddress(nid)
|
||||
}
|
||||
|
||||
func (ias *InitActorState) Lookup(cst *hamt.CborIpldStore, addr address.Address) (address.Address, error) {
|
||||
amap, err := hamt.LoadNode(context.TODO(), cst, ias.AddressMap)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("ias lookup failed loading hamt node: %w", err)
|
||||
}
|
||||
|
||||
var val interface{}
|
||||
err = amap.Find(context.TODO(), string(addr.Bytes()), &val)
|
||||
if err != nil {
|
||||
return address.Undef, xerrors.Errorf("ias lookup failed to do find: %w", err)
|
||||
}
|
||||
|
||||
ival, ok := val.(uint64)
|
||||
if !ok {
|
||||
return address.Undef, fmt.Errorf("invalid value in init actor state, expected uint64, got %T", val)
|
||||
}
|
||||
|
||||
return address.NewIDAddress(ival)
|
||||
}
|
||||
|
||||
type AccountActorState struct {
|
||||
Address address.Address
|
||||
}
|
||||
|
||||
func ComputeActorAddress(creator address.Address, nonce uint64) (address.Address, ActorError) {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := buf.Write(creator.Bytes())
|
||||
if err != nil {
|
||||
return address.Undef, aerrors.Escalate(err, "could not write address")
|
||||
}
|
||||
|
||||
err = binary.Write(buf, binary.BigEndian, nonce)
|
||||
if err != nil {
|
||||
return address.Undef, aerrors.Escalate(err, "could not write nonce")
|
||||
}
|
||||
|
||||
addr, err := address.NewActorAddress(buf.Bytes())
|
||||
if err != nil {
|
||||
return address.Undef, aerrors.Escalate(err, "could not create address")
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,222 +0,0 @@
|
||||
package actors_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/stmgr"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
hamt "github.com/ipfs/go-hamt-ipld"
|
||||
blockstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
func TestMinerCommitSectors(t *testing.T) {
|
||||
var worker, client address.Address
|
||||
var minerAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&worker, 1000000),
|
||||
HarnessAddr(&client, 1000000),
|
||||
HarnessActor(&minerAddr, &worker, actors.StorageMinerCodeCid,
|
||||
func() cbg.CBORMarshaler {
|
||||
return &actors.StorageMinerConstructorParams{
|
||||
Owner: worker,
|
||||
Worker: worker,
|
||||
SectorSize: 1024,
|
||||
PeerID: "fakepeerid",
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
h.vm.Syscalls.ValidatePoRep = func(ctx context.Context, maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64) (bool, aerrors.ActorError) {
|
||||
// all proofs are valid
|
||||
return true, nil
|
||||
}
|
||||
|
||||
ret, _ := h.SendFunds(t, worker, minerAddr, types.NewInt(100000))
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.InvokeWithValue(t, client, actors.StorageMarketAddress, actors.SMAMethods.AddBalance, types.NewInt(2000), nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
addSectorToMiner(h, t, minerAddr, worker, client, 1)
|
||||
|
||||
assertSectorIDs(h, t, minerAddr, []uint64{1})
|
||||
}
|
||||
|
||||
func TestMinerSubmitBadFault(t *testing.T) {
|
||||
var worker, client address.Address
|
||||
var minerAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&worker, 1000000),
|
||||
HarnessAddr(&client, 1000000),
|
||||
HarnessActor(&minerAddr, &worker, actors.StorageMinerCodeCid,
|
||||
func() cbg.CBORMarshaler {
|
||||
return &actors.StorageMinerConstructorParams{
|
||||
Owner: worker,
|
||||
Worker: worker,
|
||||
SectorSize: 1024,
|
||||
PeerID: "fakepeerid",
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
h.vm.Syscalls.ValidatePoRep = func(ctx context.Context, maddr address.Address, ssize uint64, commD, commR, ticket, proof, seed []byte, sectorID uint64) (bool, aerrors.ActorError) {
|
||||
// all proofs are valid
|
||||
return true, nil
|
||||
}
|
||||
|
||||
ret, _ := h.SendFunds(t, worker, minerAddr, types.NewInt(100000))
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.InvokeWithValue(t, client, actors.StorageMarketAddress, actors.SMAMethods.AddBalance, types.NewInt(2000), nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
addSectorToMiner(h, t, minerAddr, worker, client, 1)
|
||||
|
||||
assertSectorIDs(h, t, minerAddr, []uint64{1})
|
||||
|
||||
bf := types.NewBitField()
|
||||
bf.Set(6)
|
||||
ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
assertSectorIDs(h, t, minerAddr, []uint64{1})
|
||||
|
||||
badnum := uint64(0)
|
||||
badnum--
|
||||
bf = types.NewBitField()
|
||||
bf.Set(badnum)
|
||||
ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
bf = types.NewBitField()
|
||||
bf.Set(1)
|
||||
ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.DeclareFaults, &actors.DeclareFaultsParams{bf})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.Invoke(t, actors.NetworkAddress, minerAddr, actors.MAMethods.SubmitElectionPoSt, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
assertSectorIDs(h, t, minerAddr, []uint64{})
|
||||
|
||||
}
|
||||
|
||||
func addSectorToMiner(h *Harness, t *testing.T, minerAddr, worker, client address.Address, sid uint64) {
|
||||
t.Helper()
|
||||
s := sectorbuilder.UserBytesForSectorSize(1024)
|
||||
deal := h.makeFakeDeal(t, minerAddr, worker, client, s)
|
||||
ret, _ := h.Invoke(t, worker, actors.StorageMarketAddress, actors.SMAMethods.PublishStorageDeals,
|
||||
&actors.PublishStorageDealsParams{
|
||||
Deals: []actors.StorageDealProposal{*deal},
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
var dealIds actors.PublishStorageDealResponse
|
||||
if err := dealIds.UnmarshalCBOR(bytes.NewReader(ret.Return)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dealid := dealIds.DealIDs[0]
|
||||
|
||||
ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.PreCommitSector,
|
||||
&actors.SectorPreCommitInfo{
|
||||
SectorNumber: sid,
|
||||
CommR: []byte("cats"),
|
||||
SealEpoch: 10,
|
||||
DealIDs: []uint64{dealid},
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
h.BlockHeight += 100
|
||||
ret, _ = h.Invoke(t, worker, minerAddr, actors.MAMethods.ProveCommitSector,
|
||||
&actors.SectorProveCommitInfo{
|
||||
Proof: []byte("prooofy"),
|
||||
SectorID: sid,
|
||||
DealIDs: []uint64{dealid}, // TODO: weird that i have to pass this again
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
}
|
||||
|
||||
func assertSectorIDs(h *Harness, t *testing.T, maddr address.Address, ids []uint64) {
|
||||
t.Helper()
|
||||
sectors, err := getMinerSectorSet(context.TODO(), h.vm.StateTree(), h.bs, maddr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(sectors) != len(ids) {
|
||||
t.Fatal("miner has wrong number of sectors in their sector set")
|
||||
}
|
||||
|
||||
all := make(map[uint64]bool)
|
||||
for _, s := range sectors {
|
||||
all[s.SectorID] = true
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
if !all[id] {
|
||||
t.Fatal("expected to find sector ID: ", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getMinerSectorSet(ctx context.Context, st types.StateTree, bs blockstore.Blockstore, maddr address.Address) ([]*api.ChainSectorInfo, error) {
|
||||
mact, err := st.GetActor(maddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cst := hamt.CSTFromBstore(bs)
|
||||
|
||||
var mstate actors.StorageMinerActorState
|
||||
if err := cst.Get(ctx, mact.Head, &mstate); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stmgr.LoadSectorsFromSet(ctx, bs, mstate.Sectors)
|
||||
}
|
||||
|
||||
func (h *Harness) makeFakeDeal(t *testing.T, miner, worker, client address.Address, size uint64) *actors.StorageDealProposal {
|
||||
data := make([]byte, size)
|
||||
rand.Read(data)
|
||||
commP, err := sectorbuilder.GeneratePieceCommitment(bytes.NewReader(data), size)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
prop := actors.StorageDealProposal{
|
||||
PieceRef: commP[:],
|
||||
PieceSize: size,
|
||||
//PieceSerialization SerializationMode // Needs to be here as it tells how data in the sector maps to PieceRef cid
|
||||
|
||||
Client: client,
|
||||
Provider: miner,
|
||||
|
||||
ProposalExpiration: 10000,
|
||||
Duration: 150,
|
||||
|
||||
StoragePricePerEpoch: types.NewInt(1),
|
||||
StorageCollateral: types.NewInt(0),
|
||||
}
|
||||
|
||||
if err := api.SignWith(context.TODO(), h.w.Sign, client, &prop); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return &prop
|
||||
}
|
||||
@ -1,431 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
type MultiSigActor struct{}
|
||||
type MultiSigActorState struct {
|
||||
Signers []address.Address
|
||||
Required uint64
|
||||
NextTxID uint64
|
||||
|
||||
InitialBalance types.BigInt
|
||||
StartingBlock uint64
|
||||
UnlockDuration uint64
|
||||
|
||||
//TODO: make this map/sharray/whatever
|
||||
Transactions []MTransaction
|
||||
}
|
||||
|
||||
func (msas MultiSigActorState) canSpend(act *types.Actor, amnt types.BigInt, height uint64) bool {
|
||||
if msas.UnlockDuration == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
offset := height - msas.StartingBlock
|
||||
if offset > msas.UnlockDuration {
|
||||
return true
|
||||
}
|
||||
|
||||
minBalance := types.BigDiv(msas.InitialBalance, types.NewInt(msas.UnlockDuration))
|
||||
minBalance = types.BigMul(minBalance, types.NewInt(offset))
|
||||
return !minBalance.LessThan(types.BigSub(act.Balance, amnt))
|
||||
}
|
||||
|
||||
func (msas MultiSigActorState) isSigner(addr address.Address) bool {
|
||||
for _, s := range msas.Signers {
|
||||
if s == addr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (msas MultiSigActorState) getTransaction(txid uint64) (*MTransaction, ActorError) {
|
||||
if txid >= uint64(len(msas.Transactions)) {
|
||||
return nil, aerrors.Newf(1, "could not get transaction (numbers of tx %d,want to get txid %d)", len(msas.Transactions), txid)
|
||||
}
|
||||
return &msas.Transactions[txid], nil
|
||||
}
|
||||
|
||||
type MTransaction struct {
|
||||
Created uint64 // NOT USED ??
|
||||
TxID uint64
|
||||
|
||||
To address.Address
|
||||
Value types.BigInt
|
||||
Method uint64
|
||||
Params []byte
|
||||
|
||||
Approved []address.Address
|
||||
Complete bool
|
||||
Canceled bool
|
||||
RetCode uint64
|
||||
}
|
||||
|
||||
func (tx MTransaction) Active() ActorError {
|
||||
if tx.Complete {
|
||||
return aerrors.New(2, "transaction already completed")
|
||||
}
|
||||
if tx.Canceled {
|
||||
return aerrors.New(3, "transaction canceled")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type musigMethods struct {
|
||||
MultiSigConstructor uint64
|
||||
Propose uint64
|
||||
Approve uint64
|
||||
Cancel uint64
|
||||
ClearCompleted uint64
|
||||
AddSigner uint64
|
||||
RemoveSigner uint64
|
||||
SwapSigner uint64
|
||||
ChangeRequirement uint64
|
||||
}
|
||||
|
||||
var MultiSigMethods = musigMethods{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
func (msa MultiSigActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
1: msa.MultiSigConstructor,
|
||||
2: msa.Propose,
|
||||
3: msa.Approve,
|
||||
4: msa.Cancel,
|
||||
//5: msa.ClearCompleted,
|
||||
6: msa.AddSigner,
|
||||
7: msa.RemoveSigner,
|
||||
8: msa.SwapSigner,
|
||||
9: msa.ChangeRequirement,
|
||||
}
|
||||
}
|
||||
|
||||
type MultiSigConstructorParams struct {
|
||||
Signers []address.Address
|
||||
Required uint64
|
||||
UnlockDuration uint64
|
||||
}
|
||||
|
||||
func (MultiSigActor) MultiSigConstructor(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigConstructorParams) ([]byte, ActorError) {
|
||||
self := &MultiSigActorState{
|
||||
Signers: params.Signers,
|
||||
Required: params.Required,
|
||||
}
|
||||
|
||||
if params.UnlockDuration != 0 {
|
||||
self.InitialBalance = vmctx.Message().Value
|
||||
self.UnlockDuration = params.UnlockDuration
|
||||
self.StartingBlock = vmctx.BlockHeight()
|
||||
}
|
||||
|
||||
head, err := vmctx.Storage().Put(self)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not put new head")
|
||||
}
|
||||
err = vmctx.Storage().Commit(EmptyCBOR, head)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not commit new head")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type MultiSigProposeParams struct {
|
||||
To address.Address
|
||||
Value types.BigInt
|
||||
Method uint64
|
||||
Params []byte
|
||||
}
|
||||
|
||||
func (MultiSigActor) load(vmctx types.VMContext) (cid.Cid, *MultiSigActorState, ActorError) {
|
||||
var self MultiSigActorState
|
||||
head := vmctx.Storage().GetHead()
|
||||
|
||||
err := vmctx.Storage().Get(head, &self)
|
||||
if err != nil {
|
||||
return cid.Undef, nil, aerrors.Wrap(err, "could not get self")
|
||||
}
|
||||
return head, &self, nil
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) loadAndVerify(vmctx types.VMContext) (cid.Cid, *MultiSigActorState, ActorError) {
|
||||
head, self, err := msa.load(vmctx)
|
||||
if err != nil {
|
||||
return cid.Undef, nil, err
|
||||
}
|
||||
|
||||
if !self.isSigner(vmctx.Message().From) {
|
||||
return cid.Undef, nil, aerrors.New(1, "not authorized")
|
||||
}
|
||||
return head, self, nil
|
||||
}
|
||||
|
||||
func (MultiSigActor) save(vmctx types.VMContext, oldHead cid.Cid, self *MultiSigActorState) ActorError {
|
||||
newHead, err := vmctx.Storage().Put(self)
|
||||
if err != nil {
|
||||
return aerrors.Wrap(err, "could not put new head")
|
||||
}
|
||||
err = vmctx.Storage().Commit(oldHead, newHead)
|
||||
if err != nil {
|
||||
return aerrors.Wrap(err, "could not commit new head")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) Propose(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigProposeParams) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.loadAndVerify(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
txid := self.NextTxID
|
||||
self.NextTxID++
|
||||
|
||||
{
|
||||
tx := MTransaction{
|
||||
TxID: txid,
|
||||
To: params.To,
|
||||
Value: params.Value,
|
||||
Method: params.Method,
|
||||
Params: params.Params,
|
||||
Approved: []address.Address{vmctx.Message().From},
|
||||
}
|
||||
self.Transactions = append(self.Transactions, tx)
|
||||
}
|
||||
|
||||
tx, err := self.getTransaction(txid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if self.Required == 1 {
|
||||
if !self.canSpend(act, tx.Value, vmctx.BlockHeight()) {
|
||||
return nil, aerrors.New(100, "transaction amount exceeds available")
|
||||
}
|
||||
_, err := vmctx.Send(tx.To, tx.Method, tx.Value, tx.Params)
|
||||
if aerrors.IsFatal(err) {
|
||||
return nil, err
|
||||
}
|
||||
tx.RetCode = uint64(aerrors.RetCode(err))
|
||||
tx.Complete = true
|
||||
}
|
||||
|
||||
err = msa.save(vmctx, head, self)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "saving state")
|
||||
}
|
||||
|
||||
// REVIEW: On one hand, I like being very explicit about how we're doing the serialization
|
||||
// on the other, maybe we shouldnt do direct calls to underlying serialization libs?
|
||||
return cbg.CborEncodeMajorType(cbg.MajUnsignedInt, tx.TxID), nil
|
||||
}
|
||||
|
||||
type MultiSigTxID struct {
|
||||
TxID uint64
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) Approve(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigTxID) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.loadAndVerify(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, err := self.getTransaction(params.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Active(); err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not approve")
|
||||
}
|
||||
|
||||
for _, signer := range tx.Approved {
|
||||
if signer == vmctx.Message().From {
|
||||
return nil, aerrors.New(4, "already signed this message")
|
||||
}
|
||||
}
|
||||
tx.Approved = append(tx.Approved, vmctx.Message().From)
|
||||
if uint64(len(tx.Approved)) >= self.Required {
|
||||
if !self.canSpend(act, tx.Value, vmctx.BlockHeight()) {
|
||||
return nil, aerrors.New(100, "transaction amount exceeds available")
|
||||
}
|
||||
_, err := vmctx.Send(tx.To, tx.Method, tx.Value, tx.Params)
|
||||
if aerrors.IsFatal(err) {
|
||||
return nil, err
|
||||
}
|
||||
tx.RetCode = uint64(aerrors.RetCode(err))
|
||||
tx.Complete = true
|
||||
}
|
||||
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) Cancel(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigTxID) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.loadAndVerify(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx, err := self.getTransaction(params.TxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := tx.Active(); err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not cancel")
|
||||
}
|
||||
|
||||
proposer := tx.Approved[0]
|
||||
if proposer != vmctx.Message().From && self.isSigner(proposer) {
|
||||
return nil, aerrors.New(4, "cannot cancel another signers transaction")
|
||||
}
|
||||
tx.Canceled = true
|
||||
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
|
||||
type MultiSigAddSignerParam struct {
|
||||
Signer address.Address
|
||||
Increase bool
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) AddSigner(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigAddSignerParam) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.load(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg := vmctx.Message()
|
||||
if msg.From != msg.To {
|
||||
return nil, aerrors.New(4, "add signer must be called by wallet itself")
|
||||
}
|
||||
if self.isSigner(params.Signer) {
|
||||
return nil, aerrors.New(5, "new address is already a signer")
|
||||
}
|
||||
|
||||
self.Signers = append(self.Signers, params.Signer)
|
||||
if params.Increase {
|
||||
self.Required = self.Required + 1
|
||||
}
|
||||
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
|
||||
type MultiSigRemoveSignerParam struct {
|
||||
Signer address.Address
|
||||
Decrease bool
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) RemoveSigner(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigRemoveSignerParam) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.load(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg := vmctx.Message()
|
||||
if msg.From != msg.To {
|
||||
return nil, aerrors.New(4, "remove signer must be called by wallet itself")
|
||||
}
|
||||
if !self.isSigner(params.Signer) {
|
||||
return nil, aerrors.New(5, "given address was not a signer")
|
||||
}
|
||||
|
||||
newSigners := make([]address.Address, 0, len(self.Signers)-1)
|
||||
for _, s := range self.Signers {
|
||||
if s != params.Signer {
|
||||
newSigners = append(newSigners, s)
|
||||
}
|
||||
}
|
||||
if params.Decrease || uint64(len(self.Signers)-1) < self.Required {
|
||||
self.Required = self.Required - 1
|
||||
}
|
||||
|
||||
self.Signers = newSigners
|
||||
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
|
||||
type MultiSigSwapSignerParams struct {
|
||||
From address.Address
|
||||
To address.Address
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) SwapSigner(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigSwapSignerParams) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.load(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg := vmctx.Message()
|
||||
if msg.From != msg.To {
|
||||
return nil, aerrors.New(4, "swap signer must be called by wallet itself")
|
||||
}
|
||||
|
||||
if !self.isSigner(params.From) {
|
||||
return nil, aerrors.New(5, "given old address was not a signer")
|
||||
}
|
||||
if self.isSigner(params.To) {
|
||||
return nil, aerrors.New(6, "given new address was already a signer")
|
||||
}
|
||||
|
||||
newSigners := make([]address.Address, 0, len(self.Signers))
|
||||
for _, s := range self.Signers {
|
||||
if s != params.From {
|
||||
newSigners = append(newSigners, s)
|
||||
}
|
||||
}
|
||||
newSigners = append(newSigners, params.To)
|
||||
self.Signers = newSigners
|
||||
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
|
||||
type MultiSigChangeReqParams struct {
|
||||
Req uint64
|
||||
}
|
||||
|
||||
func (msa MultiSigActor) ChangeRequirement(act *types.Actor, vmctx types.VMContext,
|
||||
params *MultiSigChangeReqParams) ([]byte, ActorError) {
|
||||
|
||||
head, self, err := msa.load(vmctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg := vmctx.Message()
|
||||
if msg.From != msg.To {
|
||||
return nil, aerrors.New(4, "change requirement must be called by wallet itself")
|
||||
}
|
||||
|
||||
if params.Req < 1 {
|
||||
return nil, aerrors.New(5, "requirement must be at least 1")
|
||||
}
|
||||
|
||||
if params.Req > uint64(len(self.Signers)) {
|
||||
return nil, aerrors.New(6, "requirement must be at most the numbers of signers")
|
||||
}
|
||||
|
||||
self.Required = params.Req
|
||||
return nil, msa.save(vmctx, head, self)
|
||||
}
|
||||
@ -1,97 +0,0 @@
|
||||
package actors_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"github.com/stretchr/testify/assert"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
)
|
||||
|
||||
func TestMultiSigCreate(t *testing.T) {
|
||||
var creatorAddr, sig1Addr, sig2Addr, outsideAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&creatorAddr, 100000),
|
||||
HarnessAddr(&sig1Addr, 100000),
|
||||
HarnessAddr(&sig2Addr, 100000),
|
||||
HarnessAddr(&outsideAddr, 100000),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
ret, _ := h.CreateActor(t, creatorAddr, actors.MultisigCodeCid,
|
||||
&actors.MultiSigConstructorParams{
|
||||
Signers: []address.Address{creatorAddr, sig1Addr, sig2Addr},
|
||||
Required: 2,
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
}
|
||||
|
||||
func ApplyOK(t testing.TB, ret *vm.ApplyRet) {
|
||||
t.Helper()
|
||||
if ret.ExitCode != 0 {
|
||||
t.Fatalf("exit code should be 0, got %d, actorErr: %+v", ret.ExitCode, ret.ActorErr)
|
||||
}
|
||||
if ret.ActorErr != nil {
|
||||
t.Fatalf("somehow got an error with exit == 0: %s", ret.ActorErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiSigOps(t *testing.T) {
|
||||
var creatorAddr, sig1Addr, sig2Addr, outsideAddr address.Address
|
||||
var multSigAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&creatorAddr, 100000),
|
||||
HarnessAddr(&sig1Addr, 100000),
|
||||
HarnessAddr(&sig2Addr, 100000),
|
||||
HarnessAddr(&outsideAddr, 100000),
|
||||
HarnessActor(&multSigAddr, &creatorAddr, actors.MultisigCodeCid,
|
||||
func() cbg.CBORMarshaler {
|
||||
return &actors.MultiSigConstructorParams{
|
||||
Signers: []address.Address{creatorAddr, sig1Addr, sig2Addr},
|
||||
Required: 2,
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
{
|
||||
const chargeVal = 2000
|
||||
// Send funds into the multisig
|
||||
ret, _ := h.SendFunds(t, creatorAddr, multSigAddr, types.NewInt(chargeVal))
|
||||
ApplyOK(t, ret)
|
||||
h.AssertBalanceChange(t, creatorAddr, -chargeVal)
|
||||
h.AssertBalanceChange(t, multSigAddr, chargeVal)
|
||||
}
|
||||
|
||||
{
|
||||
// Transfer funds outside of multsig
|
||||
const sendVal = 1000
|
||||
ret, _ := h.Invoke(t, creatorAddr, multSigAddr, actors.MultiSigMethods.Propose,
|
||||
&actors.MultiSigProposeParams{
|
||||
To: outsideAddr,
|
||||
Value: types.NewInt(sendVal),
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
var txIDParam actors.MultiSigTxID
|
||||
err := cbor.DecodeInto(ret.Return, &txIDParam.TxID)
|
||||
assert.NoError(t, err, "decoding txid")
|
||||
|
||||
ret, _ = h.Invoke(t, outsideAddr, multSigAddr, actors.MultiSigMethods.Approve,
|
||||
&txIDParam)
|
||||
assert.Equal(t, uint8(1), ret.ExitCode, "outsideAddr should not approve")
|
||||
h.AssertBalanceChange(t, multSigAddr, 0)
|
||||
|
||||
ret2, _ := h.Invoke(t, sig1Addr, multSigAddr, actors.MultiSigMethods.Approve,
|
||||
&txIDParam)
|
||||
ApplyOK(t, ret2)
|
||||
|
||||
h.AssertBalanceChange(t, outsideAddr, sendVal)
|
||||
h.AssertBalanceChange(t, multSigAddr, -sendVal)
|
||||
}
|
||||
|
||||
}
|
||||
@ -1,302 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/minio/blake2b-simd"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type PaymentChannelActor struct{}
|
||||
|
||||
type PaymentInfo struct {
|
||||
PayChActor address.Address
|
||||
Payer address.Address
|
||||
ChannelMessage *cid.Cid
|
||||
|
||||
Vouchers []*types.SignedVoucher
|
||||
}
|
||||
|
||||
type LaneState struct {
|
||||
Closed bool
|
||||
Redeemed types.BigInt
|
||||
Nonce uint64
|
||||
}
|
||||
|
||||
type PaymentChannelActorState struct {
|
||||
From address.Address
|
||||
To address.Address
|
||||
|
||||
ToSend types.BigInt
|
||||
|
||||
ClosingAt uint64
|
||||
MinCloseHeight uint64
|
||||
|
||||
// TODO: needs to be map[uint64]*laneState
|
||||
// waiting on refmt#35 to be fixed
|
||||
LaneStates map[string]*LaneState
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
1: pca.Constructor,
|
||||
2: pca.UpdateChannelState,
|
||||
3: pca.Close,
|
||||
4: pca.Collect,
|
||||
5: pca.GetOwner,
|
||||
6: pca.GetToSend,
|
||||
}
|
||||
}
|
||||
|
||||
type pcaMethods struct {
|
||||
Constructor uint64
|
||||
UpdateChannelState uint64
|
||||
Close uint64
|
||||
Collect uint64
|
||||
GetOwner uint64
|
||||
GetToSend uint64
|
||||
}
|
||||
|
||||
var PCAMethods = pcaMethods{1, 2, 3, 4, 5, 6}
|
||||
|
||||
type PCAConstructorParams struct {
|
||||
To address.Address
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) Constructor(act *types.Actor, vmctx types.VMContext, params *PCAConstructorParams) ([]byte, ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
self.From = vmctx.Origin()
|
||||
self.To = params.To
|
||||
self.LaneStates = make(map[string]*LaneState)
|
||||
|
||||
storage := vmctx.Storage()
|
||||
c, err := storage.Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := storage.Commit(EmptyCBOR, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type PCAUpdateChannelStateParams struct {
|
||||
Sv types.SignedVoucher
|
||||
Secret []byte
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
func hash(b []byte) []byte {
|
||||
s := blake2b.Sum256(b)
|
||||
return s[:]
|
||||
}
|
||||
|
||||
type PaymentVerifyParams struct {
|
||||
Extra []byte
|
||||
Proof []byte
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) UpdateChannelState(act *types.Actor, vmctx types.VMContext, params *PCAUpdateChannelStateParams) ([]byte, ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
oldstate := vmctx.Storage().GetHead()
|
||||
storage := vmctx.Storage()
|
||||
if err := storage.Get(oldstate, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sv := params.Sv
|
||||
|
||||
vb, nerr := sv.SigningBytes()
|
||||
if nerr != nil {
|
||||
return nil, aerrors.Escalate(nerr, "failed to serialize signedvoucher")
|
||||
}
|
||||
|
||||
if err := vmctx.VerifySignature(sv.Signature, self.From, vb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vmctx.BlockHeight() < sv.TimeLock {
|
||||
return nil, aerrors.New(2, "cannot use this voucher yet!")
|
||||
}
|
||||
|
||||
if len(sv.SecretPreimage) > 0 {
|
||||
if !bytes.Equal(hash(params.Secret), sv.SecretPreimage) {
|
||||
return nil, aerrors.New(3, "incorrect secret!")
|
||||
}
|
||||
}
|
||||
|
||||
if sv.Extra != nil {
|
||||
encoded, err := SerializeParams(&PaymentVerifyParams{sv.Extra.Data, params.Proof})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = vmctx.Send(sv.Extra.Actor, sv.Extra.Method, types.NewInt(0), encoded)
|
||||
if err != nil {
|
||||
return nil, aerrors.Newf(4, "spend voucher verification failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
ls, ok := self.LaneStates[fmt.Sprint(sv.Lane)]
|
||||
if !ok {
|
||||
ls = new(LaneState)
|
||||
ls.Redeemed = types.NewInt(0) // TODO: kinda annoying that this doesnt default to a usable value
|
||||
self.LaneStates[fmt.Sprint(sv.Lane)] = ls
|
||||
}
|
||||
if ls.Closed {
|
||||
return nil, aerrors.New(5, "cannot redeem a voucher on a closed lane")
|
||||
}
|
||||
|
||||
if ls.Nonce > sv.Nonce {
|
||||
return nil, aerrors.New(6, "voucher has an outdated nonce, cannot redeem")
|
||||
}
|
||||
|
||||
mergeValue := types.NewInt(0)
|
||||
for _, merge := range sv.Merges {
|
||||
if merge.Lane == sv.Lane {
|
||||
return nil, aerrors.New(7, "voucher cannot merge its own lane")
|
||||
}
|
||||
|
||||
ols := self.LaneStates[fmt.Sprint(merge.Lane)]
|
||||
|
||||
if ols.Nonce >= merge.Nonce {
|
||||
return nil, aerrors.New(8, "merge in voucher has outdated nonce, cannot redeem")
|
||||
}
|
||||
|
||||
mergeValue = types.BigAdd(mergeValue, ols.Redeemed)
|
||||
ols.Nonce = merge.Nonce
|
||||
}
|
||||
|
||||
ls.Nonce = sv.Nonce
|
||||
balanceDelta := types.BigSub(sv.Amount, types.BigAdd(mergeValue, ls.Redeemed))
|
||||
ls.Redeemed = sv.Amount
|
||||
|
||||
newSendBalance := types.BigAdd(self.ToSend, balanceDelta)
|
||||
if newSendBalance.LessThan(types.NewInt(0)) {
|
||||
// TODO: is this impossible?
|
||||
return nil, aerrors.New(9, "voucher would leave channel balance negative")
|
||||
}
|
||||
|
||||
if newSendBalance.GreaterThan(act.Balance) {
|
||||
return nil, aerrors.New(10, "not enough funds in channel to cover voucher")
|
||||
}
|
||||
|
||||
log.Info("vals: ", newSendBalance, sv.Amount, balanceDelta, mergeValue, ls.Redeemed)
|
||||
self.ToSend = newSendBalance
|
||||
|
||||
if sv.MinCloseHeight != 0 {
|
||||
if self.ClosingAt != 0 && self.ClosingAt < sv.MinCloseHeight {
|
||||
self.ClosingAt = sv.MinCloseHeight
|
||||
}
|
||||
if self.MinCloseHeight < sv.MinCloseHeight {
|
||||
self.MinCloseHeight = sv.MinCloseHeight
|
||||
}
|
||||
}
|
||||
|
||||
ncid, err := storage.Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := storage.Commit(oldstate, ncid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) Close(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
storage := vmctx.Storage()
|
||||
oldstate := storage.GetHead()
|
||||
if err := storage.Get(oldstate, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vmctx.Message().From != self.From && vmctx.Message().From != self.To {
|
||||
return nil, aerrors.New(1, "not authorized to close channel")
|
||||
}
|
||||
|
||||
if self.ClosingAt != 0 {
|
||||
return nil, aerrors.New(2, "channel already closing")
|
||||
}
|
||||
|
||||
self.ClosingAt = vmctx.BlockHeight() + build.PaymentChannelClosingDelay
|
||||
if self.ClosingAt < self.MinCloseHeight {
|
||||
self.ClosingAt = self.MinCloseHeight
|
||||
}
|
||||
|
||||
ncid, err := storage.Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := storage.Commit(oldstate, ncid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) Collect(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
storage := vmctx.Storage()
|
||||
oldstate := storage.GetHead()
|
||||
if err := storage.Get(oldstate, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if self.ClosingAt == 0 {
|
||||
return nil, aerrors.New(1, "payment channel not closing or closed")
|
||||
}
|
||||
|
||||
if vmctx.BlockHeight() < self.ClosingAt {
|
||||
return nil, aerrors.New(2, "payment channel not closed yet")
|
||||
}
|
||||
_, err := vmctx.Send(self.From, 0, types.BigSub(act.Balance, self.ToSend), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = vmctx.Send(self.To, 0, self.ToSend, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
self.ToSend = types.NewInt(0)
|
||||
|
||||
ncid, err := storage.Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := storage.Commit(oldstate, ncid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) GetOwner(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
storage := vmctx.Storage()
|
||||
if err := storage.Get(storage.GetHead(), &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return self.From.Bytes(), nil
|
||||
}
|
||||
|
||||
func (pca PaymentChannelActor) GetToSend(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, aerrors.ActorError) {
|
||||
var self PaymentChannelActorState
|
||||
storage := vmctx.Storage()
|
||||
if err := storage.Get(storage.GetHead(), &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return self.ToSend.Bytes(), nil
|
||||
}
|
||||
@ -1,93 +0,0 @@
|
||||
package actors_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
)
|
||||
|
||||
func TestPaychCreate(t *testing.T) {
|
||||
var creatorAddr, targetAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&creatorAddr, 100000),
|
||||
HarnessAddr(&targetAddr, 100000),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
ret, _ := h.CreateActor(t, creatorAddr, actors.PaymentChannelCodeCid,
|
||||
&actors.PCAConstructorParams{
|
||||
To: targetAddr,
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
}
|
||||
|
||||
func signVoucher(t *testing.T, w *wallet.Wallet, addr address.Address, sv *types.SignedVoucher) {
|
||||
vb, err := sv.SigningBytes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig, err := w.Sign(context.TODO(), addr, vb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sv.Signature = sig
|
||||
}
|
||||
|
||||
func TestPaychUpdate(t *testing.T) {
|
||||
var creatorAddr, targetAddr address.Address
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&creatorAddr, 100000),
|
||||
HarnessAddr(&targetAddr, 100000),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
ret, _ := h.CreateActor(t, creatorAddr, actors.PaymentChannelCodeCid,
|
||||
&actors.PCAConstructorParams{
|
||||
To: targetAddr,
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
pch, err := address.NewFromBytes(ret.Return)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ret, _ = h.SendFunds(t, creatorAddr, pch, types.NewInt(5000))
|
||||
ApplyOK(t, ret)
|
||||
|
||||
sv := &types.SignedVoucher{
|
||||
Amount: types.NewInt(100),
|
||||
Nonce: 1,
|
||||
}
|
||||
signVoucher(t, h.w, creatorAddr, sv)
|
||||
|
||||
ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.UpdateChannelState, &actors.PCAUpdateChannelStateParams{
|
||||
Sv: *sv,
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.GetToSend, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
bi := types.BigFromBytes(ret.Return)
|
||||
if bi.String() != "100" {
|
||||
t.Fatal("toSend amount was wrong: ", bi.String())
|
||||
}
|
||||
|
||||
ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.Close, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
// now we have to 'wait' for the chain to advance.
|
||||
h.BlockHeight = 1000
|
||||
|
||||
ret, _ = h.Invoke(t, targetAddr, pch, actors.PCAMethods.Collect, nil)
|
||||
ApplyOK(t, ret)
|
||||
|
||||
h.AssertBalanceChange(t, targetAddr, 100)
|
||||
h.AssertBalanceChange(t, creatorAddr, -100)
|
||||
}
|
||||
@ -1,670 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-amt-ipld"
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/ipfs/go-hamt-ipld"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-cbor-util"
|
||||
"github.com/filecoin-project/go-sectorbuilder"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type StorageMarketActor struct{}
|
||||
|
||||
type smaMethods struct {
|
||||
Constructor uint64
|
||||
WithdrawBalance uint64
|
||||
AddBalance uint64
|
||||
CheckLockedBalance uint64
|
||||
PublishStorageDeals uint64
|
||||
HandleCronAction uint64
|
||||
SettleExpiredDeals uint64
|
||||
ProcessStorageDealsPayment uint64
|
||||
SlashStorageDealCollateral uint64
|
||||
GetLastExpirationFromDealIDs uint64
|
||||
ActivateStorageDeals uint64
|
||||
ComputeDataCommitment uint64
|
||||
}
|
||||
|
||||
var SMAMethods = smaMethods{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
|
||||
|
||||
func (sma StorageMarketActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
2: sma.WithdrawBalance,
|
||||
3: sma.AddBalance,
|
||||
// 4: sma.CheckLockedBalance,
|
||||
5: sma.PublishStorageDeals,
|
||||
// 6: sma.HandleCronAction,
|
||||
// 7: sma.SettleExpiredDeals,
|
||||
// 8: sma.ProcessStorageDealsPayment,
|
||||
// 9: sma.SlashStorageDealCollateral,
|
||||
// 10: sma.GetLastExpirationFromDealIDs,
|
||||
11: sma.ActivateStorageDeals, // TODO: move under PublishStorageDeals after specs team approves
|
||||
12: sma.ComputeDataCommitment,
|
||||
}
|
||||
}
|
||||
|
||||
type StorageParticipantBalance struct {
|
||||
Locked types.BigInt
|
||||
Available types.BigInt
|
||||
}
|
||||
|
||||
type StorageMarketState struct {
|
||||
Balances cid.Cid // hamt<addr, StorageParticipantBalance>
|
||||
Deals cid.Cid // amt<StorageDeal>
|
||||
|
||||
NextDealID uint64 // TODO: spec
|
||||
}
|
||||
|
||||
// TODO: Drop in favour of car storage
|
||||
type SerializationMode = uint64
|
||||
|
||||
const (
|
||||
SerializationUnixFSv0 = iota
|
||||
// IPLD / car
|
||||
)
|
||||
|
||||
type StorageDealProposal struct {
|
||||
PieceRef []byte // cid bytes // TODO: spec says to use cid.Cid, probably not a good idea
|
||||
PieceSize uint64
|
||||
|
||||
Client address.Address
|
||||
Provider address.Address
|
||||
|
||||
ProposalExpiration uint64
|
||||
Duration uint64 // TODO: spec
|
||||
|
||||
StoragePricePerEpoch types.BigInt
|
||||
StorageCollateral types.BigInt
|
||||
|
||||
ProposerSignature *types.Signature
|
||||
}
|
||||
|
||||
func (sdp *StorageDealProposal) TotalStoragePrice() types.BigInt {
|
||||
return types.BigMul(sdp.StoragePricePerEpoch, types.NewInt(sdp.Duration))
|
||||
}
|
||||
|
||||
type SignFunc = func(context.Context, []byte) (*types.Signature, error)
|
||||
|
||||
func (sdp *StorageDealProposal) Sign(ctx context.Context, sign SignFunc) error {
|
||||
if sdp.ProposerSignature != nil {
|
||||
return xerrors.New("signature already present in StorageDealProposal")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := sdp.MarshalCBOR(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := sign(ctx, buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sdp.ProposerSignature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sdp *StorageDealProposal) Cid() (cid.Cid, error) {
|
||||
nd, err := cborutil.AsIpld(sdp)
|
||||
if err != nil {
|
||||
return cid.Undef, err
|
||||
}
|
||||
|
||||
return nd.Cid(), nil
|
||||
}
|
||||
|
||||
func (sdp *StorageDealProposal) Verify() error {
|
||||
unsigned := *sdp
|
||||
unsigned.ProposerSignature = nil
|
||||
var buf bytes.Buffer
|
||||
if err := unsigned.MarshalCBOR(&buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sdp.ProposerSignature.Verify(sdp.Client, buf.Bytes())
|
||||
}
|
||||
|
||||
type OnChainDeal struct {
|
||||
PieceRef []byte // cid bytes // TODO: spec says to use cid.Cid, probably not a good idea
|
||||
PieceSize uint64
|
||||
|
||||
Client address.Address
|
||||
Provider address.Address
|
||||
|
||||
ProposalExpiration uint64
|
||||
Duration uint64 // TODO: spec
|
||||
|
||||
StoragePricePerEpoch types.BigInt
|
||||
StorageCollateral types.BigInt
|
||||
ActivationEpoch uint64 // 0 = inactive
|
||||
}
|
||||
|
||||
type WithdrawBalanceParams struct {
|
||||
Balance types.BigInt
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) WithdrawBalance(act *types.Actor, vmctx types.VMContext, params *WithdrawBalanceParams) ([]byte, ActorError) {
|
||||
// TODO: (spec) this should be 2-stage
|
||||
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, bnd, err := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, vmctx.Message().From)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not get balance")
|
||||
}
|
||||
|
||||
balance := b[0]
|
||||
|
||||
if balance.Available.LessThan(params.Balance) {
|
||||
return nil, aerrors.Newf(1, "can not withdraw more funds than available: %s > %s", params.Balance, b[0].Available)
|
||||
}
|
||||
|
||||
balance.Available = types.BigSub(balance.Available, params.Balance)
|
||||
|
||||
_, err = vmctx.Send(vmctx.Message().From, 0, params.Balance, nil)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "sending funds failed")
|
||||
}
|
||||
|
||||
bcid, err := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{
|
||||
vmctx.Message().From: balance,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
self.Balances = bcid
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, vmctx.Storage().Commit(old, nroot)
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) AddBalance(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b, bnd, err := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, vmctx.Message().From)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "could not get balance")
|
||||
}
|
||||
|
||||
balance := b[0]
|
||||
|
||||
balance.Available = types.BigAdd(balance.Available, vmctx.Message().Value)
|
||||
|
||||
bcid, err := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{
|
||||
vmctx.Message().From: balance,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
self.Balances = bcid
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, vmctx.Storage().Commit(old, nroot)
|
||||
}
|
||||
|
||||
func setMarketBalances(vmctx types.VMContext, nd *hamt.Node, set map[address.Address]StorageParticipantBalance) (cid.Cid, ActorError) {
|
||||
keys := make([]address.Address, 0, len(set))
|
||||
for k := range set {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
return bytes.Compare(keys[i].Bytes(), keys[j].Bytes()) < 0
|
||||
})
|
||||
for _, addr := range keys {
|
||||
balance := set[addr]
|
||||
if err := nd.Set(vmctx.Context(), string(addr.Bytes()), &balance); err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "setting new balance")
|
||||
}
|
||||
}
|
||||
if err := nd.Flush(vmctx.Context()); err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "flushing balance hamt")
|
||||
}
|
||||
|
||||
c, err := vmctx.Ipld().Put(vmctx.Context(), nd)
|
||||
if err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to balances storage")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func GetMarketBalances(ctx context.Context, store *hamt.CborIpldStore, rcid cid.Cid, addrs ...address.Address) ([]StorageParticipantBalance, *hamt.Node, ActorError) {
|
||||
ctx, span := trace.StartSpan(ctx, "GetMarketBalances")
|
||||
defer span.End()
|
||||
|
||||
nd, err := hamt.LoadNode(ctx, store, rcid)
|
||||
if err != nil {
|
||||
return nil, nil, aerrors.HandleExternalError(err, "failed to load miner set")
|
||||
}
|
||||
|
||||
out := make([]StorageParticipantBalance, len(addrs))
|
||||
|
||||
for i, a := range addrs {
|
||||
var balance StorageParticipantBalance
|
||||
err = nd.Find(ctx, string(a.Bytes()), &balance)
|
||||
switch err {
|
||||
case hamt.ErrNotFound:
|
||||
out[i] = StorageParticipantBalance{
|
||||
Locked: types.NewInt(0),
|
||||
Available: types.NewInt(0),
|
||||
}
|
||||
case nil:
|
||||
out[i] = balance
|
||||
default:
|
||||
return nil, nil, aerrors.HandleExternalError(err, "failed to do set lookup")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return out, nd, nil
|
||||
}
|
||||
|
||||
/*
|
||||
func (sma StorageMarketActor) CheckLockedBalance(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
|
||||
}
|
||||
*/
|
||||
|
||||
type PublishStorageDealsParams struct {
|
||||
Deals []StorageDealProposal
|
||||
}
|
||||
|
||||
type PublishStorageDealResponse struct {
|
||||
DealIDs []uint64
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) PublishStorageDeals(act *types.Actor, vmctx types.VMContext, params *PublishStorageDealsParams) ([]byte, ActorError) {
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "loading deals amt")
|
||||
}
|
||||
|
||||
// todo: handle duplicate deals
|
||||
|
||||
if len(params.Deals) == 0 {
|
||||
return nil, aerrors.New(1, "no storage deals in params.Deals")
|
||||
}
|
||||
|
||||
out := PublishStorageDealResponse{
|
||||
DealIDs: make([]uint64, len(params.Deals)),
|
||||
}
|
||||
|
||||
workerBytes, aerr := vmctx.Send(params.Deals[0].Provider, MAMethods.GetWorkerAddr, types.NewInt(0), nil)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
providerWorker, err := address.NewFromBytes(workerBytes)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "parsing provider worker address bytes")
|
||||
}
|
||||
|
||||
// TODO: REVIEW: Do we want to check if provider exists in the power actor?
|
||||
|
||||
for i, deal := range params.Deals {
|
||||
if err := self.validateDeal(vmctx, deal, providerWorker); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := deals.Set(self.NextDealID, &OnChainDeal{
|
||||
PieceRef: deal.PieceRef,
|
||||
PieceSize: deal.PieceSize,
|
||||
|
||||
Client: deal.Client,
|
||||
Provider: deal.Provider,
|
||||
|
||||
ProposalExpiration: deal.ProposalExpiration,
|
||||
Duration: deal.Duration,
|
||||
|
||||
StoragePricePerEpoch: deal.StoragePricePerEpoch,
|
||||
StorageCollateral: deal.StorageCollateral,
|
||||
ActivationEpoch: 0,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "setting deal in deal AMT")
|
||||
}
|
||||
out.DealIDs[i] = self.NextDealID
|
||||
|
||||
self.NextDealID++
|
||||
}
|
||||
|
||||
dealsCid, err := deals.Flush()
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "saving deals AMT")
|
||||
}
|
||||
|
||||
self.Deals = dealsCid
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "storing state failed")
|
||||
}
|
||||
|
||||
aerr = vmctx.Storage().Commit(old, nroot)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
var outBuf bytes.Buffer
|
||||
if err := out.MarshalCBOR(&outBuf); err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "serialising output")
|
||||
}
|
||||
|
||||
return outBuf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (st *StorageMarketState) validateDeal(vmctx types.VMContext, deal StorageDealProposal, providerWorker address.Address) aerrors.ActorError {
|
||||
ctx, span := trace.StartSpan(vmctx.Context(), "validateDeal")
|
||||
defer span.End()
|
||||
|
||||
if vmctx.BlockHeight() > deal.ProposalExpiration {
|
||||
return aerrors.New(1, "deal proposal already expired")
|
||||
}
|
||||
|
||||
if vmctx.Message().From != providerWorker {
|
||||
return aerrors.New(2, "Deals must be submitted by the miner worker")
|
||||
}
|
||||
|
||||
if err := deal.Verify(); err != nil {
|
||||
return aerrors.Absorb(err, 3, "verifying proposer signature")
|
||||
}
|
||||
|
||||
// TODO: do some caching (changes gas so needs to be in spec too)
|
||||
b, bnd, aerr := GetMarketBalances(ctx, vmctx.Ipld(), st.Balances, deal.Client, providerWorker)
|
||||
if aerr != nil {
|
||||
return aerrors.Wrap(aerr, "getting client, and provider balances")
|
||||
}
|
||||
clientBalance := b[0]
|
||||
providerBalance := b[1]
|
||||
|
||||
totalPrice := deal.TotalStoragePrice()
|
||||
|
||||
if clientBalance.Available.LessThan(totalPrice) {
|
||||
return aerrors.Newf(5, "client doesn't have enough available funds to cover storage price; %d < %d", clientBalance.Available, totalPrice)
|
||||
}
|
||||
|
||||
clientBalance = lockFunds(clientBalance, totalPrice)
|
||||
|
||||
// TODO: REVIEW: Not clear who pays for this
|
||||
if providerBalance.Available.LessThan(deal.StorageCollateral) {
|
||||
return aerrors.Newf(6, "provider doesn't have enough available funds to cover StorageCollateral; %d < %d", providerBalance.Available, deal.StorageCollateral)
|
||||
}
|
||||
|
||||
providerBalance = lockFunds(providerBalance, deal.StorageCollateral)
|
||||
|
||||
// TODO: piece checks (e.g. size > sectorSize)?
|
||||
|
||||
bcid, aerr := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{
|
||||
deal.Client: clientBalance,
|
||||
providerWorker: providerBalance,
|
||||
})
|
||||
if aerr != nil {
|
||||
return aerr
|
||||
}
|
||||
|
||||
st.Balances = bcid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ActivateStorageDealsParams struct {
|
||||
Deals []uint64
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) ActivateStorageDeals(act *types.Actor, vmctx types.VMContext, params *ActivateStorageDealsParams) ([]byte, ActorError) {
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "loading deals amt")
|
||||
}
|
||||
|
||||
for _, deal := range params.Deals {
|
||||
var dealInfo OnChainDeal
|
||||
if err := deals.Get(deal, &dealInfo); err != nil {
|
||||
if _, is := err.(*amt.ErrNotFound); is {
|
||||
return nil, aerrors.New(3, "deal not found")
|
||||
}
|
||||
return nil, aerrors.HandleExternalError(err, "getting deal info failed")
|
||||
}
|
||||
|
||||
if vmctx.Message().From != dealInfo.Provider {
|
||||
return nil, aerrors.New(1, "ActivateStorageDeals can only be called by the deal provider")
|
||||
}
|
||||
|
||||
if vmctx.BlockHeight() > dealInfo.ProposalExpiration {
|
||||
return nil, aerrors.New(2, "deal cannot be activated: proposal expired")
|
||||
}
|
||||
|
||||
if dealInfo.ActivationEpoch > 0 {
|
||||
// this probably can't happen in practice
|
||||
return nil, aerrors.New(3, "deal already active")
|
||||
}
|
||||
|
||||
dealInfo.ActivationEpoch = vmctx.BlockHeight()
|
||||
|
||||
if err := deals.Set(deal, &dealInfo); err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "setting deal info in AMT failed")
|
||||
}
|
||||
}
|
||||
|
||||
dealsCid, err := deals.Flush()
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "saving deals AMT")
|
||||
}
|
||||
|
||||
self.Deals = dealsCid
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "storing state failed")
|
||||
}
|
||||
|
||||
aerr := vmctx.Storage().Commit(old, nroot)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type ProcessStorageDealsPaymentParams struct {
|
||||
DealIDs []uint64
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) ProcessStorageDealsPayment(act *types.Actor, vmctx types.VMContext, params *ProcessStorageDealsPaymentParams) ([]byte, ActorError) {
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "loading deals amt")
|
||||
}
|
||||
|
||||
// TODO: Would be nice if send could assert actor type
|
||||
workerBytes, aerr := vmctx.Send(vmctx.Message().From, MAMethods.GetWorkerAddr, types.NewInt(0), nil)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
providerWorker, err := address.NewFromBytes(workerBytes)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "parsing provider worker address bytes")
|
||||
}
|
||||
|
||||
for _, deal := range params.DealIDs {
|
||||
var dealInfo OnChainDeal
|
||||
if err := deals.Get(deal, &dealInfo); err != nil {
|
||||
if _, is := err.(*amt.ErrNotFound); is {
|
||||
return nil, aerrors.New(2, "deal not found")
|
||||
}
|
||||
return nil, aerrors.HandleExternalError(err, "getting deal info failed")
|
||||
}
|
||||
|
||||
if dealInfo.Provider != vmctx.Message().From {
|
||||
return nil, aerrors.New(3, "ProcessStorageDealsPayment can only be called by deal provider")
|
||||
}
|
||||
|
||||
if vmctx.BlockHeight() < dealInfo.ActivationEpoch {
|
||||
// TODO: This is probably fatal
|
||||
return nil, aerrors.New(4, "ActivationEpoch lower than block height")
|
||||
}
|
||||
|
||||
if vmctx.BlockHeight() > dealInfo.ActivationEpoch+dealInfo.Duration {
|
||||
// Deal expired, miner should drop it
|
||||
// TODO: process payment for the remainder of last proving period
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
toPay := types.BigMul(dealInfo.StoragePricePerEpoch, types.NewInt(build.SlashablePowerDelay))
|
||||
|
||||
b, bnd, aerr := GetMarketBalances(vmctx.Context(), vmctx.Ipld(), self.Balances, dealInfo.Client, providerWorker)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
clientBal := b[0]
|
||||
providerBal := b[1]
|
||||
|
||||
clientBal.Locked, providerBal.Available = transferFunds(clientBal.Locked, providerBal.Available, toPay)
|
||||
|
||||
// TODO: call set once
|
||||
bcid, aerr := setMarketBalances(vmctx, bnd, map[address.Address]StorageParticipantBalance{
|
||||
dealInfo.Client: clientBal,
|
||||
providerWorker: providerBal,
|
||||
})
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
self.Balances = bcid
|
||||
}
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "storing state failed")
|
||||
}
|
||||
|
||||
aerr = vmctx.Storage().Commit(old, nroot)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func lockFunds(p StorageParticipantBalance, amt types.BigInt) StorageParticipantBalance {
|
||||
p.Available, p.Locked = transferFunds(p.Available, p.Locked, amt)
|
||||
return p
|
||||
}
|
||||
|
||||
func transferFunds(from, to, amt types.BigInt) (types.BigInt, types.BigInt) {
|
||||
// TODO: some asserts
|
||||
return types.BigSub(from, amt), types.BigAdd(to, amt)
|
||||
}
|
||||
|
||||
type ComputeDataCommitmentParams struct {
|
||||
DealIDs []uint64
|
||||
SectorSize uint64
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) ComputeDataCommitment(act *types.Actor, vmctx types.VMContext, params *ComputeDataCommitmentParams) ([]byte, ActorError) {
|
||||
var self StorageMarketState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deals, err := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.Deals)
|
||||
if err != nil {
|
||||
return nil, aerrors.HandleExternalError(err, "loading deals amt")
|
||||
}
|
||||
|
||||
if len(params.DealIDs) == 0 {
|
||||
return nil, aerrors.New(3, "no deal IDs")
|
||||
}
|
||||
|
||||
var pieces []sectorbuilder.PublicPieceInfo
|
||||
for _, deal := range params.DealIDs {
|
||||
var dealInfo OnChainDeal
|
||||
if err := deals.Get(deal, &dealInfo); err != nil {
|
||||
if _, is := err.(*amt.ErrNotFound); is {
|
||||
return nil, aerrors.New(4, "deal not found")
|
||||
}
|
||||
return nil, aerrors.HandleExternalError(err, "getting deal info failed")
|
||||
}
|
||||
|
||||
if dealInfo.Provider != vmctx.Message().From {
|
||||
return nil, aerrors.New(5, "referenced deal was not from caller")
|
||||
}
|
||||
|
||||
var commP [32]byte
|
||||
copy(commP[:], dealInfo.PieceRef)
|
||||
|
||||
pieces = append(pieces, sectorbuilder.PublicPieceInfo{
|
||||
Size: dealInfo.PieceSize,
|
||||
CommP: commP,
|
||||
})
|
||||
}
|
||||
|
||||
commd, err := sectorbuilder.GenerateDataCommitment(params.SectorSize, pieces)
|
||||
if err != nil {
|
||||
return nil, aerrors.Absorb(err, 6, "failed to generate data commitment from pieces")
|
||||
}
|
||||
|
||||
return commd[:], nil
|
||||
}
|
||||
|
||||
/*
|
||||
func (sma StorageMarketActor) HandleCronAction(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) SettleExpiredDeals(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) SlashStorageDealCollateral(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
|
||||
}
|
||||
|
||||
func (sma StorageMarketActor) GetLastExpirationFromDealIDs(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
|
||||
}
|
||||
*/
|
||||
@ -1,797 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-amt-ipld"
|
||||
cid "github.com/ipfs/go-cid"
|
||||
hamt "github.com/ipfs/go-hamt-ipld"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"go.opencensus.io/trace"
|
||||
xerrors "golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type StoragePowerActor struct{}
|
||||
|
||||
type spaMethods struct {
|
||||
Constructor uint64
|
||||
CreateStorageMiner uint64
|
||||
ArbitrateConsensusFault uint64
|
||||
UpdateStorage uint64
|
||||
GetTotalStorage uint64
|
||||
PowerLookup uint64
|
||||
IsValidMiner uint64
|
||||
PledgeCollateralForSize uint64
|
||||
CheckProofSubmissions uint64
|
||||
}
|
||||
|
||||
var SPAMethods = spaMethods{1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
func (spa StoragePowerActor) Exports() []interface{} {
|
||||
return []interface{}{
|
||||
//1: spa.StoragePowerConstructor,
|
||||
2: spa.CreateStorageMiner,
|
||||
3: spa.ArbitrateConsensusFault,
|
||||
4: spa.UpdateStorage,
|
||||
5: spa.GetTotalStorage,
|
||||
6: spa.PowerLookup,
|
||||
7: spa.IsValidMiner,
|
||||
8: spa.PledgeCollateralForSize,
|
||||
9: spa.CheckProofSubmissions,
|
||||
}
|
||||
}
|
||||
|
||||
type StoragePowerState struct {
|
||||
Miners cid.Cid
|
||||
ProvingBuckets cid.Cid // amt[ProvingPeriodBucket]hamt[minerAddress]struct{}
|
||||
MinerCount uint64
|
||||
LastMinerCheck uint64
|
||||
|
||||
TotalStorage types.BigInt
|
||||
}
|
||||
|
||||
type CreateStorageMinerParams struct {
|
||||
Owner address.Address
|
||||
Worker address.Address
|
||||
SectorSize uint64
|
||||
PeerID peer.ID
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) CreateStorageMiner(act *types.Actor, vmctx types.VMContext, params *CreateStorageMinerParams) ([]byte, ActorError) {
|
||||
if !build.SupportedSectorSize(params.SectorSize) {
|
||||
return nil, aerrors.New(1, "Unsupported sector size")
|
||||
}
|
||||
|
||||
var self StoragePowerState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqColl, err := pledgeCollateralForSize(vmctx, types.NewInt(0), self.TotalStorage, self.MinerCount+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if vmctx.Message().Value.LessThan(reqColl) {
|
||||
return nil, aerrors.Newf(1, "not enough funds passed to cover required miner collateral (needed %s, got %s)", reqColl, vmctx.Message().Value)
|
||||
}
|
||||
|
||||
encoded, err := CreateExecParams(StorageMinerCodeCid, &StorageMinerConstructorParams{
|
||||
Owner: params.Owner,
|
||||
Worker: params.Worker,
|
||||
SectorSize: params.SectorSize,
|
||||
PeerID: params.PeerID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, err := vmctx.Send(InitAddress, IAMethods.Exec, vmctx.Message().Value, encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
naddr, nerr := address.NewFromBytes(ret)
|
||||
if nerr != nil {
|
||||
return nil, aerrors.Absorb(nerr, 2, "could not read address of new actor")
|
||||
}
|
||||
|
||||
ncid, err := MinerSetAdd(context.TODO(), vmctx, self.Miners, naddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
self.Miners = ncid
|
||||
self.MinerCount++
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(old, nroot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return naddr.Bytes(), nil
|
||||
}
|
||||
|
||||
type ArbitrateConsensusFaultParams struct {
|
||||
Block1 *types.BlockHeader
|
||||
Block2 *types.BlockHeader
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) ArbitrateConsensusFault(act *types.Actor, vmctx types.VMContext, params *ArbitrateConsensusFaultParams) ([]byte, ActorError) {
|
||||
if params.Block1.Miner != params.Block2.Miner {
|
||||
return nil, aerrors.New(2, "blocks must be from the same miner")
|
||||
}
|
||||
|
||||
rval, err := vmctx.Send(params.Block1.Miner, MAMethods.GetWorkerAddr, types.NewInt(0), nil)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrap(err, "failed to get miner worker")
|
||||
}
|
||||
|
||||
worker, oerr := address.NewFromBytes(rval)
|
||||
if oerr != nil {
|
||||
// REVIEW: should this be fatal? i can't think of a real situation that would get us here
|
||||
return nil, aerrors.Absorb(oerr, 3, "response from 'GetWorkerAddr' was not a valid address")
|
||||
}
|
||||
|
||||
if err := params.Block1.CheckBlockSignature(vmctx.Context(), worker); err != nil {
|
||||
return nil, aerrors.Absorb(err, 4, "block1 did not have valid signature")
|
||||
}
|
||||
|
||||
if err := params.Block2.CheckBlockSignature(vmctx.Context(), worker); err != nil {
|
||||
return nil, aerrors.Absorb(err, 5, "block2 did not have valid signature")
|
||||
}
|
||||
|
||||
// see the "Consensus Faults" section of the faults spec (faults.md)
|
||||
// for details on these slashing conditions.
|
||||
if !shouldSlash(params.Block1, params.Block2) {
|
||||
return nil, aerrors.New(6, "blocks do not prove a slashable offense")
|
||||
}
|
||||
|
||||
var self StoragePowerState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if types.BigCmp(self.TotalStorage, types.NewInt(0)) == 0 {
|
||||
return nil, aerrors.Fatal("invalid state, storage power actor has zero total storage")
|
||||
}
|
||||
|
||||
miner := params.Block1.Miner
|
||||
if has, err := MinerSetHas(vmctx, self.Miners, miner); err != nil {
|
||||
return nil, aerrors.Wrapf(err, "failed to check miner in set")
|
||||
} else if !has {
|
||||
return nil, aerrors.New(7, "either already slashed or not a miner")
|
||||
}
|
||||
|
||||
minerPower, err := powerLookup(context.TODO(), vmctx, &self, miner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slashedCollateral, err := pledgeCollateralForSize(vmctx, minerPower, self.TotalStorage, self.MinerCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enc, err := SerializeParams(&MinerSlashConsensusFault{
|
||||
Slasher: vmctx.Message().From,
|
||||
AtHeight: params.Block1.Height,
|
||||
SlashedCollateral: slashedCollateral,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = vmctx.Send(miner, MAMethods.SlashConsensusFault, types.NewInt(0), enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the miner from the list of network miners
|
||||
ncid, err := MinerSetRemove(context.TODO(), vmctx, self.Miners, miner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
self.Miners = ncid
|
||||
self.MinerCount--
|
||||
|
||||
self.TotalStorage = types.BigSub(self.TotalStorage, minerPower)
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(old, nroot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func cidArrContains(a []cid.Cid, b cid.Cid) bool {
|
||||
for _, c := range a {
|
||||
if b == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func shouldSlash(block1, block2 *types.BlockHeader) bool {
|
||||
// First slashing condition, blocks have the same ticket round
|
||||
if block1.Height == block2.Height {
|
||||
return true
|
||||
}
|
||||
|
||||
/* Second slashing condition requires having access to the parent tipset blocks
|
||||
// This might not always be available, needs some thought on the best way to deal with this
|
||||
|
||||
|
||||
// Second slashing condition, miner ignored own block when mining
|
||||
// Case A: block2 could have been in block1's parent set but is not
|
||||
b1ParentHeight := block1.Height - len(block1.Tickets)
|
||||
|
||||
block1ParentTipSet := block1.Parents
|
||||
if !cidArrContains(block1.Parents, block2.Cid()) &&
|
||||
b1ParentHeight == block2.Height &&
|
||||
block1ParentTipSet.ParentCids == block2.ParentCids {
|
||||
return true
|
||||
}
|
||||
|
||||
// Case B: block1 could have been in block2's parent set but is not
|
||||
block2ParentTipSet := parentOf(block2)
|
||||
if !block2Parent.contains(block1) &&
|
||||
block2ParentTipSet.Height == block1.Height &&
|
||||
block2ParentTipSet.ParentCids == block1.ParentCids {
|
||||
return true
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type UpdateStorageParams struct {
|
||||
Delta types.BigInt
|
||||
NextSlashDeadline uint64
|
||||
PreviousSlashDeadline uint64
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) UpdateStorage(act *types.Actor, vmctx types.VMContext, params *UpdateStorageParams) ([]byte, ActorError) {
|
||||
var self StoragePowerState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
has, err := MinerSetHas(vmctx, self.Miners, vmctx.Message().From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, aerrors.New(1, "update storage must only be called by a miner actor")
|
||||
}
|
||||
|
||||
self.TotalStorage = types.BigAdd(self.TotalStorage, params.Delta)
|
||||
|
||||
previousBucket := params.PreviousSlashDeadline % build.SlashablePowerDelay
|
||||
nextBucket := params.NextSlashDeadline % build.SlashablePowerDelay
|
||||
|
||||
if previousBucket == nextBucket && params.PreviousSlashDeadline != 0 {
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(old, nroot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil // Nothing to do
|
||||
}
|
||||
|
||||
buckets, eerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingBuckets)
|
||||
if eerr != nil {
|
||||
return nil, aerrors.HandleExternalError(eerr, "loading proving buckets amt")
|
||||
}
|
||||
|
||||
if params.PreviousSlashDeadline != 0 { // delete from previous bucket
|
||||
err := deleteMinerFromBucket(vmctx, buckets, previousBucket)
|
||||
if err != nil {
|
||||
return nil, aerrors.Wrapf(err, "delete from bucket %d, next %d", previousBucket, nextBucket)
|
||||
}
|
||||
}
|
||||
|
||||
err = addMinerToBucket(vmctx, buckets, nextBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
self.ProvingBuckets, eerr = buckets.Flush()
|
||||
if eerr != nil {
|
||||
return nil, aerrors.HandleExternalError(eerr, "flushing proving buckets")
|
||||
}
|
||||
|
||||
nroot, err := vmctx.Storage().Put(&self)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(old, nroot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func deleteMinerFromBucket(vmctx types.VMContext, buckets *amt.Root, previousBucket uint64) aerrors.ActorError {
|
||||
var bucket cid.Cid
|
||||
err := buckets.Get(previousBucket, &bucket)
|
||||
switch err.(type) {
|
||||
case *amt.ErrNotFound:
|
||||
return aerrors.HandleExternalError(err, "proving bucket missing")
|
||||
case nil: // noop
|
||||
default:
|
||||
return aerrors.HandleExternalError(err, "getting proving bucket")
|
||||
}
|
||||
|
||||
bhamt, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "failed to load proving bucket")
|
||||
}
|
||||
err = bhamt.Delete(vmctx.Context(), string(vmctx.Message().From.Bytes()))
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "deleting miner from proving bucket")
|
||||
}
|
||||
|
||||
err = bhamt.Flush(vmctx.Context())
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "flushing previous proving bucket")
|
||||
}
|
||||
|
||||
bucket, err = vmctx.Ipld().Put(vmctx.Context(), bhamt)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "putting previous proving bucket hamt")
|
||||
}
|
||||
|
||||
err = buckets.Set(previousBucket, bucket)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "setting previous proving bucket cid in amt")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addMinerToBucket(vmctx types.VMContext, buckets *amt.Root, nextBucket uint64) aerrors.ActorError {
|
||||
var bhamt *hamt.Node
|
||||
var bucket cid.Cid
|
||||
err := buckets.Get(nextBucket, &bucket)
|
||||
switch err.(type) {
|
||||
case *amt.ErrNotFound:
|
||||
bhamt = hamt.NewNode(vmctx.Ipld())
|
||||
case nil:
|
||||
bhamt, err = hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "failed to load proving bucket")
|
||||
}
|
||||
default:
|
||||
return aerrors.HandleExternalError(err, "getting proving bucket")
|
||||
}
|
||||
|
||||
err = bhamt.Set(vmctx.Context(), string(vmctx.Message().From.Bytes()), CborNull)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "setting miner in proving bucket")
|
||||
}
|
||||
|
||||
err = bhamt.Flush(vmctx.Context())
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "flushing previous proving bucket")
|
||||
}
|
||||
|
||||
bucket, err = vmctx.Ipld().Put(vmctx.Context(), bhamt)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "putting previous proving bucket hamt")
|
||||
}
|
||||
|
||||
err = buckets.Set(nextBucket, bucket)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "setting previous proving bucket cid in amt")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) GetTotalStorage(act *types.Actor, vmctx types.VMContext, params *struct{}) ([]byte, ActorError) {
|
||||
var self StoragePowerState
|
||||
if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return self.TotalStorage.Bytes(), nil
|
||||
}
|
||||
|
||||
type PowerLookupParams struct {
|
||||
Miner address.Address
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) PowerLookup(act *types.Actor, vmctx types.VMContext, params *PowerLookupParams) ([]byte, ActorError) {
|
||||
var self StoragePowerState
|
||||
if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil {
|
||||
return nil, aerrors.Wrap(err, "getting head")
|
||||
}
|
||||
|
||||
pow, err := powerLookup(context.TODO(), vmctx, &self, params.Miner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pow.Bytes(), nil
|
||||
}
|
||||
|
||||
func powerLookup(ctx context.Context, vmctx types.VMContext, self *StoragePowerState, miner address.Address) (types.BigInt, ActorError) {
|
||||
has, err := MinerSetHas(vmctx, self.Miners, miner)
|
||||
if err != nil {
|
||||
return types.EmptyInt, err
|
||||
}
|
||||
|
||||
if !has {
|
||||
return types.EmptyInt, aerrors.New(1, "miner not registered with storage power actor")
|
||||
}
|
||||
|
||||
// TODO: Use local amt
|
||||
ret, err := vmctx.Send(miner, MAMethods.GetPower, types.NewInt(0), nil)
|
||||
if err != nil {
|
||||
return types.EmptyInt, aerrors.Wrap(err, "invoke Miner.GetPower")
|
||||
}
|
||||
|
||||
return types.BigFromBytes(ret), nil
|
||||
}
|
||||
|
||||
type IsValidMinerParam struct {
|
||||
Addr address.Address
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) IsValidMiner(act *types.Actor, vmctx types.VMContext, param *IsValidMinerParam) ([]byte, ActorError) {
|
||||
var self StoragePowerState
|
||||
if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
has, err := MinerSetHas(vmctx, self.Miners, param.Addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !has {
|
||||
log.Warnf("Miner INVALID: not in set: %s", param.Addr)
|
||||
|
||||
return cbg.CborBoolFalse, nil
|
||||
}
|
||||
|
||||
ret, err := vmctx.Send(param.Addr, MAMethods.IsSlashed, types.NewInt(0), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slashed := bytes.Equal(ret, cbg.CborBoolTrue)
|
||||
|
||||
if slashed {
|
||||
log.Warnf("Miner INVALID: /SLASHED/ : %s", param.Addr)
|
||||
}
|
||||
|
||||
return cbg.EncodeBool(!slashed), nil
|
||||
}
|
||||
|
||||
type PledgeCollateralParams struct {
|
||||
Size types.BigInt
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) PledgeCollateralForSize(act *types.Actor, vmctx types.VMContext, param *PledgeCollateralParams) ([]byte, ActorError) {
|
||||
var self StoragePowerState
|
||||
if err := vmctx.Storage().Get(vmctx.Storage().GetHead(), &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalCollateral, err := pledgeCollateralForSize(vmctx, param.Size, self.TotalStorage, self.MinerCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return totalCollateral.Bytes(), nil
|
||||
}
|
||||
|
||||
func pledgeCollateralForSize(vmctx types.VMContext, size, totalStorage types.BigInt, minerCount uint64) (types.BigInt, aerrors.ActorError) {
|
||||
netBalance, err := vmctx.GetBalance(NetworkAddress)
|
||||
if err != nil {
|
||||
return types.EmptyInt, err
|
||||
}
|
||||
|
||||
// TODO: the spec says to also grab 'total vested filecoin' and include it as available
|
||||
// If we don't factor that in, we effectively assume all of the locked up filecoin is 'available'
|
||||
// the blocker on that right now is that its hard to tell how much filecoin is unlocked
|
||||
|
||||
availableFilecoin := types.BigSub(
|
||||
types.BigMul(types.NewInt(build.TotalFilecoin), types.NewInt(build.FilecoinPrecision)),
|
||||
netBalance,
|
||||
)
|
||||
|
||||
totalPowerCollateral := types.BigDiv(
|
||||
types.BigMul(
|
||||
availableFilecoin,
|
||||
types.NewInt(build.PowerCollateralProportion),
|
||||
),
|
||||
types.NewInt(build.CollateralPrecision),
|
||||
)
|
||||
|
||||
totalPerCapitaCollateral := types.BigDiv(
|
||||
types.BigMul(
|
||||
availableFilecoin,
|
||||
types.NewInt(build.PerCapitaCollateralProportion),
|
||||
),
|
||||
types.NewInt(build.CollateralPrecision),
|
||||
)
|
||||
|
||||
// REVIEW: for bootstrapping purposes, we skip the power portion of the
|
||||
// collateral if there is no collateral in the network yet
|
||||
powerCollateral := types.NewInt(0)
|
||||
if types.BigCmp(totalStorage, types.NewInt(0)) != 0 {
|
||||
powerCollateral = types.BigDiv(
|
||||
types.BigMul(
|
||||
totalPowerCollateral,
|
||||
size,
|
||||
),
|
||||
totalStorage,
|
||||
)
|
||||
}
|
||||
|
||||
perCapCollateral := types.BigDiv(
|
||||
totalPerCapitaCollateral,
|
||||
types.NewInt(minerCount),
|
||||
)
|
||||
|
||||
return types.BigAdd(powerCollateral, perCapCollateral), nil
|
||||
}
|
||||
|
||||
func (spa StoragePowerActor) CheckProofSubmissions(act *types.Actor, vmctx types.VMContext, param *struct{}) ([]byte, ActorError) {
|
||||
if vmctx.Message().From != CronAddress {
|
||||
return nil, aerrors.New(1, "CheckProofSubmissions is only callable from the cron actor")
|
||||
}
|
||||
|
||||
var self StoragePowerState
|
||||
old := vmctx.Storage().GetHead()
|
||||
if err := vmctx.Storage().Get(old, &self); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := self.LastMinerCheck; i < vmctx.BlockHeight(); i++ {
|
||||
height := i + 1
|
||||
|
||||
err := checkProofSubmissionsAtH(vmctx, &self, height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
self.LastMinerCheck = vmctx.BlockHeight()
|
||||
|
||||
nroot, aerr := vmctx.Storage().Put(&self)
|
||||
if aerr != nil {
|
||||
return nil, aerr
|
||||
}
|
||||
|
||||
if err := vmctx.Storage().Commit(old, nroot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func checkProofSubmissionsAtH(vmctx types.VMContext, self *StoragePowerState, height uint64) aerrors.ActorError {
|
||||
bucketID := height % build.SlashablePowerDelay
|
||||
|
||||
buckets, eerr := amt.LoadAMT(types.WrapStorage(vmctx.Storage()), self.ProvingBuckets)
|
||||
if eerr != nil {
|
||||
return aerrors.HandleExternalError(eerr, "loading proving buckets amt")
|
||||
}
|
||||
|
||||
var bucket cid.Cid
|
||||
err := buckets.Get(bucketID, &bucket)
|
||||
switch err.(type) {
|
||||
case *amt.ErrNotFound:
|
||||
return nil // nothing to do
|
||||
case nil:
|
||||
default:
|
||||
return aerrors.HandleExternalError(err, "getting proving bucket")
|
||||
}
|
||||
|
||||
bhamt, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), bucket)
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "failed to load proving bucket")
|
||||
}
|
||||
|
||||
err = bhamt.ForEach(vmctx.Context(), func(k string, val interface{}) error {
|
||||
_, span := trace.StartSpan(vmctx.Context(), "StoragePowerActor.CheckProofSubmissions.loop")
|
||||
defer span.End()
|
||||
|
||||
maddr, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return aerrors.Escalate(err, "parsing miner address")
|
||||
}
|
||||
|
||||
span.AddAttributes(trace.StringAttribute("miner", maddr.String()))
|
||||
|
||||
params, err := SerializeParams(&CheckMinerParams{NetworkPower: self.TotalStorage})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ret, err := vmctx.Send(maddr, MAMethods.CheckMiner, types.NewInt(0), params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(ret) == 0 {
|
||||
return nil // miner is fine
|
||||
}
|
||||
|
||||
var power types.BigInt
|
||||
if err := power.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
|
||||
return xerrors.Errorf("unmarshaling CheckMiner response (%x): %w", ret, err)
|
||||
}
|
||||
|
||||
if power.GreaterThan(types.NewInt(0)) {
|
||||
log.Warnf("slashing miner %s for missed PoSt (%s B, H: %d, Bucket: %d)", maddr, power, height, bucketID)
|
||||
|
||||
self.TotalStorage = types.BigSub(self.TotalStorage, power)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return aerrors.HandleExternalError(err, "iterating miners in proving bucket")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MinerSetHas(vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (bool, aerrors.ActorError) {
|
||||
nd, err := hamt.LoadNode(vmctx.Context(), vmctx.Ipld(), rcid)
|
||||
if err != nil {
|
||||
return false, aerrors.HandleExternalError(err, "failed to load miner set")
|
||||
}
|
||||
|
||||
err = nd.Find(vmctx.Context(), string(maddr.Bytes()), nil)
|
||||
switch err {
|
||||
case hamt.ErrNotFound:
|
||||
return false, nil
|
||||
case nil:
|
||||
return true, nil
|
||||
default:
|
||||
return false, aerrors.HandleExternalError(err, "failed to do set lookup")
|
||||
}
|
||||
}
|
||||
|
||||
func MinerSetList(ctx context.Context, cst *hamt.CborIpldStore, rcid cid.Cid) ([]address.Address, error) {
|
||||
nd, err := hamt.LoadNode(ctx, cst, rcid)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to load miner set: %w", err)
|
||||
}
|
||||
|
||||
var out []address.Address
|
||||
err = nd.ForEach(ctx, func(k string, val interface{}) error {
|
||||
addr, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = append(out, addr)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func MinerSetAdd(ctx context.Context, vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (cid.Cid, aerrors.ActorError) {
|
||||
nd, err := hamt.LoadNode(ctx, vmctx.Ipld(), rcid)
|
||||
if err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to load miner set")
|
||||
}
|
||||
|
||||
mkey := string(maddr.Bytes())
|
||||
err = nd.Find(ctx, mkey, nil)
|
||||
if err == nil {
|
||||
return cid.Undef, aerrors.New(20, "miner already in set")
|
||||
}
|
||||
|
||||
if !xerrors.Is(err, hamt.ErrNotFound) {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to do miner set check")
|
||||
}
|
||||
|
||||
if err := nd.Set(ctx, mkey, uint64(1)); err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "adding miner address to set failed")
|
||||
}
|
||||
|
||||
if err := nd.Flush(ctx); err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to flush miner set")
|
||||
}
|
||||
|
||||
c, err := vmctx.Ipld().Put(ctx, nd)
|
||||
if err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to persist miner set to storage")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func MinerSetRemove(ctx context.Context, vmctx types.VMContext, rcid cid.Cid, maddr address.Address) (cid.Cid, aerrors.ActorError) {
|
||||
nd, err := hamt.LoadNode(ctx, vmctx.Ipld(), rcid)
|
||||
if err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to load miner set")
|
||||
}
|
||||
|
||||
mkey := string(maddr.Bytes())
|
||||
switch nd.Delete(ctx, mkey) {
|
||||
default:
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to delete miner from set")
|
||||
case hamt.ErrNotFound:
|
||||
return cid.Undef, aerrors.New(1, "miner not found in set on delete")
|
||||
case nil:
|
||||
}
|
||||
|
||||
if err := nd.Flush(ctx); err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to flush miner set")
|
||||
}
|
||||
|
||||
c, err := vmctx.Ipld().Put(ctx, nd)
|
||||
if err != nil {
|
||||
return cid.Undef, aerrors.HandleExternalError(err, "failed to persist miner set to storage")
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type cbgNull struct{}
|
||||
|
||||
var CborNull = &cbgNull{}
|
||||
|
||||
func (cbgNull) MarshalCBOR(w io.Writer) error {
|
||||
n, err := w.Write(cbg.CborNull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != 1 {
|
||||
return xerrors.New("expected to write 1 byte")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cbgNull) UnmarshalCBOR(r io.Reader) error {
|
||||
b := [1]byte{}
|
||||
n, err := r.Read(b[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != 1 {
|
||||
return xerrors.New("expected 1 byte")
|
||||
}
|
||||
if !bytes.Equal(b[:], cbg.CborNull) {
|
||||
return xerrors.New("expected cbor null")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1,174 +0,0 @@
|
||||
package actors_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
. "github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
"github.com/filecoin-project/lotus/chain/wallet"
|
||||
|
||||
cid "github.com/ipfs/go-cid"
|
||||
hamt "github.com/ipfs/go-hamt-ipld"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStorageMarketCreateAndSlashMiner(t *testing.T) {
|
||||
var ownerAddr, workerAddr address.Address
|
||||
|
||||
opts := []HarnessOpt{
|
||||
HarnessAddr(&ownerAddr, 1000000),
|
||||
HarnessAddr(&workerAddr, 100000),
|
||||
}
|
||||
|
||||
h := NewHarness(t, opts...)
|
||||
|
||||
var minerAddr address.Address
|
||||
{
|
||||
// cheating the bootstrapping problem
|
||||
cheatStorageMarketTotal(t, h.vm, h.cs.Blockstore())
|
||||
|
||||
ret, _ := h.InvokeWithValue(t, ownerAddr, StoragePowerAddress, SPAMethods.CreateStorageMiner,
|
||||
types.NewInt(500000),
|
||||
&CreateStorageMinerParams{
|
||||
Owner: ownerAddr,
|
||||
Worker: workerAddr,
|
||||
SectorSize: build.SectorSizes[0],
|
||||
PeerID: "fakepeerid",
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
var err error
|
||||
minerAddr, err = address.NewFromBytes(ret.Return)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
{
|
||||
ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.IsValidMiner,
|
||||
&IsValidMinerParam{Addr: minerAddr})
|
||||
ApplyOK(t, ret)
|
||||
|
||||
var output bool
|
||||
err := cbor.DecodeInto(ret.Return, &output)
|
||||
if err != nil {
|
||||
t.Fatalf("error decoding: %+v", err)
|
||||
}
|
||||
|
||||
if !output {
|
||||
t.Fatalf("%s is miner but IsValidMiner call returned false", minerAddr)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.PowerLookup,
|
||||
&PowerLookupParams{Miner: minerAddr})
|
||||
ApplyOK(t, ret)
|
||||
power := types.BigFromBytes(ret.Return)
|
||||
|
||||
if types.BigCmp(power, types.NewInt(0)) != 0 {
|
||||
t.Fatalf("power should be zero, is: %s", power)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
ret, _ := h.Invoke(t, ownerAddr, minerAddr, MAMethods.GetOwner, nil)
|
||||
ApplyOK(t, ret)
|
||||
oA, err := address.NewFromBytes(ret.Return)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ownerAddr, oA, "return from GetOwner should be equal to the owner")
|
||||
}
|
||||
|
||||
{
|
||||
b1 := fakeBlock(t, minerAddr, 100)
|
||||
b2 := fakeBlock(t, minerAddr, 101)
|
||||
|
||||
signBlock(t, h.w, workerAddr, b1)
|
||||
signBlock(t, h.w, workerAddr, b2)
|
||||
|
||||
ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.ArbitrateConsensusFault,
|
||||
&ArbitrateConsensusFaultParams{
|
||||
Block1: b1,
|
||||
Block2: b2,
|
||||
})
|
||||
ApplyOK(t, ret)
|
||||
}
|
||||
|
||||
{
|
||||
ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.PowerLookup,
|
||||
&PowerLookupParams{Miner: minerAddr})
|
||||
assert.Equal(t, ret.ExitCode, byte(1))
|
||||
}
|
||||
|
||||
{
|
||||
ret, _ := h.Invoke(t, ownerAddr, StoragePowerAddress, SPAMethods.IsValidMiner, &IsValidMinerParam{minerAddr})
|
||||
ApplyOK(t, ret)
|
||||
assert.Equal(t, ret.Return, cbg.CborBoolFalse)
|
||||
}
|
||||
}
|
||||
|
||||
func cheatStorageMarketTotal(t *testing.T, vm *vm.VM, bs bstore.Blockstore) {
|
||||
t.Helper()
|
||||
|
||||
sma, err := vm.StateTree().GetActor(StoragePowerAddress)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cst := hamt.CSTFromBstore(bs)
|
||||
|
||||
var smastate StoragePowerState
|
||||
if err := cst.Get(context.TODO(), sma.Head, &smastate); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
smastate.TotalStorage = types.NewInt(10000)
|
||||
|
||||
c, err := cst.Put(context.TODO(), &smastate)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sma.Head = c
|
||||
|
||||
if err := vm.StateTree().SetActor(StoragePowerAddress, sma); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func fakeBlock(t *testing.T, minerAddr address.Address, ts uint64) *types.BlockHeader {
|
||||
c := fakeCid(t, 1)
|
||||
return &types.BlockHeader{Height: 5, Miner: minerAddr, Timestamp: ts, ParentStateRoot: c, Messages: c, ParentMessageReceipts: c, BLSAggregate: types.Signature{Type: types.KTBLS}}
|
||||
}
|
||||
|
||||
func fakeCid(t *testing.T, s int) cid.Cid {
|
||||
t.Helper()
|
||||
c, err := cid.NewPrefixV1(cid.Raw, mh.IDENTITY).Sum([]byte(fmt.Sprintf("%d", s)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func signBlock(t *testing.T, w *wallet.Wallet, worker address.Address, blk *types.BlockHeader) {
|
||||
t.Helper()
|
||||
sb, err := blk.SigningBytes()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sig, err := w.Sign(context.TODO(), worker, sb)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
blk.BlockSig = sig
|
||||
}
|
||||
@ -1,52 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
var AccountCodeCid cid.Cid
|
||||
var CronCodeCid cid.Cid
|
||||
var StoragePowerCodeCid cid.Cid
|
||||
var StorageMarketCodeCid cid.Cid
|
||||
var StorageMinerCodeCid cid.Cid
|
||||
var MultisigCodeCid cid.Cid
|
||||
var InitCodeCid cid.Cid
|
||||
var PaymentChannelCodeCid cid.Cid
|
||||
|
||||
var InitAddress = mustIDAddress(0)
|
||||
var NetworkAddress = mustIDAddress(1)
|
||||
var StoragePowerAddress = mustIDAddress(2)
|
||||
var StorageMarketAddress = mustIDAddress(3) // TODO: missing from spec
|
||||
var CronAddress = mustIDAddress(4)
|
||||
var BurntFundsAddress = mustIDAddress(99)
|
||||
|
||||
func mustIDAddress(i uint64) address.Address {
|
||||
a, err := address.NewIDAddress(i)
|
||||
if err != nil {
|
||||
panic(err) // ok
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func init() {
|
||||
pref := cid.NewPrefixV1(cid.Raw, mh.IDENTITY)
|
||||
mustSum := func(s string) cid.Cid {
|
||||
c, err := pref.Sum([]byte(s))
|
||||
if err != nil {
|
||||
panic(err) // ok
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
AccountCodeCid = mustSum("fil/1/account") // TODO: spec
|
||||
CronCodeCid = mustSum("fil/1/cron")
|
||||
StoragePowerCodeCid = mustSum("fil/1/power")
|
||||
StorageMarketCodeCid = mustSum("fil/1/market")
|
||||
StorageMinerCodeCid = mustSum("fil/1/miner")
|
||||
MultisigCodeCid = mustSum("fil/1/multisig")
|
||||
InitCodeCid = mustSum("fil/1/init")
|
||||
PaymentChannelCodeCid = mustSum("fil/1/paych")
|
||||
}
|
||||
@ -1,158 +0,0 @@
|
||||
package actors_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
. "github.com/filecoin-project/lotus/chain/actors"
|
||||
"github.com/filecoin-project/lotus/chain/gen"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/chain/vm"
|
||||
dstore "github.com/ipfs/go-datastore"
|
||||
bstore "github.com/ipfs/go-ipfs-blockstore"
|
||||
)
|
||||
|
||||
func blsaddr(n uint64) address.Address {
|
||||
buf := make([]byte, 48)
|
||||
binary.PutUvarint(buf, n)
|
||||
|
||||
addr, err := address.NewBLSAddress(buf)
|
||||
if err != nil {
|
||||
panic(err) // ok
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
func setupVMTestEnv(t *testing.T) (*vm.VM, []address.Address, bstore.Blockstore) {
|
||||
bs := bstore.NewBlockstore(dstore.NewMapDatastore())
|
||||
|
||||
from := blsaddr(0)
|
||||
maddr := blsaddr(1)
|
||||
|
||||
actors := map[address.Address]types.BigInt{
|
||||
from: types.NewInt(1000000),
|
||||
maddr: types.NewInt(0),
|
||||
}
|
||||
st, err := gen.MakeInitialStateTree(bs, actors)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stateroot, err := st.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cs := store.NewChainStore(bs, nil)
|
||||
|
||||
// TODO: should probabaly mock out the randomness bit, nil works for now
|
||||
vm, err := vm.NewVM(stateroot, 1, nil, maddr, cs.Blockstore())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return vm, []address.Address{from, maddr}, bs
|
||||
}
|
||||
|
||||
func TestVMInvokeMethod(t *testing.T) {
|
||||
vm, addrs, _ := setupVMTestEnv(t)
|
||||
from := addrs[0]
|
||||
|
||||
var err error
|
||||
cenc, err := SerializeParams(&StorageMinerConstructorParams{Owner: from, Worker: from})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
execparams := &ExecParams{
|
||||
Code: StorageMinerCodeCid,
|
||||
Params: cenc,
|
||||
}
|
||||
enc, err := SerializeParams(execparams)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: InitAddress,
|
||||
From: from,
|
||||
Method: IAMethods.Exec,
|
||||
Params: enc,
|
||||
GasPrice: types.NewInt(1),
|
||||
GasLimit: types.NewInt(10000),
|
||||
Value: types.NewInt(0),
|
||||
}
|
||||
|
||||
ret, err := vm.ApplyMessage(context.TODO(), msg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ret.ExitCode != 0 {
|
||||
t.Fatal("invocation failed")
|
||||
}
|
||||
|
||||
outaddr, err := address.NewFromBytes(ret.Return)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if outaddr.String() != "t0102" {
|
||||
t.Fatal("hold up")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorageMarketActorCreateMiner(t *testing.T) {
|
||||
vm, addrs, bs := setupVMTestEnv(t)
|
||||
from := addrs[0]
|
||||
maddr := addrs[1]
|
||||
|
||||
cheatStorageMarketTotal(t, vm, bs)
|
||||
|
||||
params := &StorageMinerConstructorParams{
|
||||
Owner: maddr,
|
||||
Worker: maddr,
|
||||
SectorSize: build.SectorSizes[0],
|
||||
PeerID: "fakepeerid",
|
||||
}
|
||||
var err error
|
||||
enc, err := SerializeParams(params)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
msg := &types.Message{
|
||||
To: StoragePowerAddress,
|
||||
From: from,
|
||||
Method: SPAMethods.CreateStorageMiner,
|
||||
Params: enc,
|
||||
GasPrice: types.NewInt(1),
|
||||
GasLimit: types.NewInt(10000),
|
||||
Value: types.NewInt(50000),
|
||||
}
|
||||
|
||||
ret, err := vm.ApplyMessage(context.TODO(), msg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ret.ExitCode != 0 {
|
||||
fmt.Println(ret.ActorErr)
|
||||
t.Fatal("invocation failed: ", ret.ExitCode)
|
||||
}
|
||||
|
||||
outaddr, err := address.NewFromBytes(ret.Return)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if outaddr.String() != "t0102" {
|
||||
t.Fatal("hold up")
|
||||
}
|
||||
}
|
||||
@ -1,14 +0,0 @@
|
||||
package actors
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
)
|
||||
|
||||
func NewIDAddress(id uint64) (address.Address, ActorError) {
|
||||
a, err := address.NewIDAddress(id)
|
||||
if err != nil {
|
||||
return address.Undef, aerrors.Escalate(err, "could not create ID Address")
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
65
chain/actors/adt/adt.go
Normal file
65
chain/actors/adt/adt.go
Normal file
@ -0,0 +1,65 @@
|
||||
package adt
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
type Map interface {
|
||||
Root() (cid.Cid, error)
|
||||
|
||||
Put(k abi.Keyer, v cbor.Marshaler) error
|
||||
Get(k abi.Keyer, v cbor.Unmarshaler) (bool, error)
|
||||
Delete(k abi.Keyer) error
|
||||
|
||||
ForEach(v cbor.Unmarshaler, fn func(key string) error) error
|
||||
}
|
||||
|
||||
func AsMap(store Store, root cid.Cid, version builtin.Version) (Map, error) {
|
||||
switch version {
|
||||
case builtin.Version0:
|
||||
return adt0.AsMap(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewMap(store Store, version builtin.Version) (Map, error) {
|
||||
switch version {
|
||||
case builtin.Version0:
|
||||
return adt0.MakeEmptyMap(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
type Array interface {
|
||||
Root() (cid.Cid, error)
|
||||
|
||||
Set(idx uint64, v cbor.Marshaler) error
|
||||
Get(idx uint64, v cbor.Unmarshaler) (bool, error)
|
||||
Delete(idx uint64) error
|
||||
Length() uint64
|
||||
|
||||
ForEach(v cbor.Unmarshaler, fn func(idx int64) error) error
|
||||
}
|
||||
|
||||
func AsArray(store Store, root cid.Cid, version network.Version) (Array, error) {
|
||||
switch builtin.VersionForNetwork(version) {
|
||||
case builtin.Version0:
|
||||
return adt0.AsArray(store, root)
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
|
||||
func NewArray(store Store, version builtin.Version) (Array, error) {
|
||||
switch version {
|
||||
case builtin.Version0:
|
||||
return adt0.MakeEmptyArray(store), nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown network version: %d", version)
|
||||
}
|
||||
122
chain/actors/adt/diff_adt.go
Normal file
122
chain/actors/adt/diff_adt.go
Normal file
@ -0,0 +1,122 @@
|
||||
package adt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
// AdtArrayDiff generalizes adt.Array diffing by accepting a Deferred type that can unmarshalled to its corresponding struct
|
||||
// in an interface implantation.
|
||||
// Add should be called when a new k,v is added to the array
|
||||
// Modify should be called when a value is modified in the array
|
||||
// Remove should be called when a value is removed from the array
|
||||
type AdtArrayDiff interface {
|
||||
Add(key uint64, val *typegen.Deferred) error
|
||||
Modify(key uint64, from, to *typegen.Deferred) error
|
||||
Remove(key uint64, val *typegen.Deferred) error
|
||||
}
|
||||
|
||||
// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104
|
||||
// CBOR Marshaling will likely be the largest performance bottleneck here.
|
||||
|
||||
// DiffAdtArray accepts two *adt.Array's and an AdtArrayDiff implementation. It does the following:
|
||||
// - All values that exist in preArr and not in curArr are passed to AdtArrayDiff.Remove()
|
||||
// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add()
|
||||
// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify()
|
||||
// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified.
|
||||
func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error {
|
||||
notNew := make(map[int64]struct{}, curArr.Length())
|
||||
prevVal := new(typegen.Deferred)
|
||||
if err := preArr.ForEach(prevVal, func(i int64) error {
|
||||
curVal := new(typegen.Deferred)
|
||||
found, err := curArr.Get(uint64(i), curVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
if err := out.Remove(uint64(i), prevVal); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// no modification
|
||||
if !bytes.Equal(prevVal.Raw, curVal.Raw) {
|
||||
if err := out.Modify(uint64(i), prevVal, curVal); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
notNew[i] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVal := new(typegen.Deferred)
|
||||
return curArr.ForEach(curVal, func(i int64) error {
|
||||
if _, ok := notNew[i]; ok {
|
||||
return nil
|
||||
}
|
||||
return out.Add(uint64(i), curVal)
|
||||
})
|
||||
}
|
||||
|
||||
// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104
|
||||
// CBOR Marshaling will likely be the largest performance bottleneck here.
|
||||
|
||||
// AdtMapDiff generalizes adt.Map diffing by accepting a Deferred type that can unmarshalled to its corresponding struct
|
||||
// in an interface implantation.
|
||||
// AsKey should return the Keyer implementation specific to the map
|
||||
// Add should be called when a new k,v is added to the map
|
||||
// Modify should be called when a value is modified in the map
|
||||
// Remove should be called when a value is removed from the map
|
||||
type AdtMapDiff interface {
|
||||
AsKey(key string) (abi.Keyer, error)
|
||||
Add(key string, val *typegen.Deferred) error
|
||||
Modify(key string, from, to *typegen.Deferred) error
|
||||
Remove(key string, val *typegen.Deferred) error
|
||||
}
|
||||
|
||||
func DiffAdtMap(preMap, curMap Map, out AdtMapDiff) error {
|
||||
notNew := make(map[string]struct{})
|
||||
prevVal := new(typegen.Deferred)
|
||||
if err := preMap.ForEach(prevVal, func(key string) error {
|
||||
curVal := new(typegen.Deferred)
|
||||
k, err := out.AsKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found, err := curMap.Get(k, curVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
if err := out.Remove(key, prevVal); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// no modification
|
||||
if !bytes.Equal(prevVal.Raw, curVal.Raw) {
|
||||
if err := out.Modify(key, prevVal, curVal); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
notNew[key] = struct{}{}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVal := new(typegen.Deferred)
|
||||
return curMap.ForEach(curVal, func(key string) error {
|
||||
if _, ok := notNew[key]; ok {
|
||||
return nil
|
||||
}
|
||||
return out.Add(key, curVal)
|
||||
})
|
||||
}
|
||||
300
chain/actors/adt/diff_adt_test.go
Normal file
300
chain/actors/adt/diff_adt_test.go
Normal file
@ -0,0 +1,300 @@
|
||||
package adt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cbornode "github.com/ipfs/go-ipld-cbor"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/runtime"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
bstore "github.com/filecoin-project/lotus/lib/blockstore"
|
||||
)
|
||||
|
||||
func TestDiffAdtArray(t *testing.T) {
|
||||
ctxstoreA := newContextStore()
|
||||
ctxstoreB := newContextStore()
|
||||
|
||||
arrA := adt0.MakeEmptyArray(ctxstoreA)
|
||||
arrB := adt0.MakeEmptyArray(ctxstoreB)
|
||||
|
||||
require.NoError(t, arrA.Set(0, runtime.CBORBytes([]byte{0}))) // delete
|
||||
|
||||
require.NoError(t, arrA.Set(1, runtime.CBORBytes([]byte{0}))) // modify
|
||||
require.NoError(t, arrB.Set(1, runtime.CBORBytes([]byte{1})))
|
||||
|
||||
require.NoError(t, arrA.Set(2, runtime.CBORBytes([]byte{1}))) // delete
|
||||
|
||||
require.NoError(t, arrA.Set(3, runtime.CBORBytes([]byte{0}))) // noop
|
||||
require.NoError(t, arrB.Set(3, runtime.CBORBytes([]byte{0})))
|
||||
|
||||
require.NoError(t, arrA.Set(4, runtime.CBORBytes([]byte{0}))) // modify
|
||||
require.NoError(t, arrB.Set(4, runtime.CBORBytes([]byte{6})))
|
||||
|
||||
require.NoError(t, arrB.Set(5, runtime.CBORBytes{8})) // add
|
||||
require.NoError(t, arrB.Set(6, runtime.CBORBytes{9})) // add
|
||||
|
||||
changes := new(TestDiffArray)
|
||||
|
||||
assert.NoError(t, DiffAdtArray(arrA, arrB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Added))
|
||||
// keys 5 and 6 were added
|
||||
assert.EqualValues(t, uint64(5), changes.Added[0].key)
|
||||
assert.EqualValues(t, []byte{8}, changes.Added[0].val)
|
||||
assert.EqualValues(t, uint64(6), changes.Added[1].key)
|
||||
assert.EqualValues(t, []byte{9}, changes.Added[1].val)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Modified))
|
||||
// keys 1 and 4 were modified
|
||||
assert.EqualValues(t, uint64(1), changes.Modified[0].From.key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Modified[0].From.val)
|
||||
assert.EqualValues(t, uint64(1), changes.Modified[0].To.key)
|
||||
assert.EqualValues(t, []byte{1}, changes.Modified[0].To.val)
|
||||
assert.EqualValues(t, uint64(4), changes.Modified[1].From.key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Modified[1].From.val)
|
||||
assert.EqualValues(t, uint64(4), changes.Modified[1].To.key)
|
||||
assert.EqualValues(t, []byte{6}, changes.Modified[1].To.val)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Removed))
|
||||
// keys 0 and 2 were deleted
|
||||
assert.EqualValues(t, uint64(0), changes.Removed[0].key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Removed[0].val)
|
||||
assert.EqualValues(t, uint64(2), changes.Removed[1].key)
|
||||
assert.EqualValues(t, []byte{1}, changes.Removed[1].val)
|
||||
}
|
||||
|
||||
func TestDiffAdtMap(t *testing.T) {
|
||||
ctxstoreA := newContextStore()
|
||||
ctxstoreB := newContextStore()
|
||||
|
||||
mapA := adt0.MakeEmptyMap(ctxstoreA)
|
||||
mapB := adt0.MakeEmptyMap(ctxstoreB)
|
||||
|
||||
require.NoError(t, mapA.Put(abi.UIntKey(0), runtime.CBORBytes([]byte{0}))) // delete
|
||||
|
||||
require.NoError(t, mapA.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{0}))) // modify
|
||||
require.NoError(t, mapB.Put(abi.UIntKey(1), runtime.CBORBytes([]byte{1})))
|
||||
|
||||
require.NoError(t, mapA.Put(abi.UIntKey(2), runtime.CBORBytes([]byte{1}))) // delete
|
||||
|
||||
require.NoError(t, mapA.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0}))) // noop
|
||||
require.NoError(t, mapB.Put(abi.UIntKey(3), runtime.CBORBytes([]byte{0})))
|
||||
|
||||
require.NoError(t, mapA.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{0}))) // modify
|
||||
require.NoError(t, mapB.Put(abi.UIntKey(4), runtime.CBORBytes([]byte{6})))
|
||||
|
||||
require.NoError(t, mapB.Put(abi.UIntKey(5), runtime.CBORBytes{8})) // add
|
||||
require.NoError(t, mapB.Put(abi.UIntKey(6), runtime.CBORBytes{9})) // add
|
||||
|
||||
changes := new(TestDiffMap)
|
||||
|
||||
assert.NoError(t, DiffAdtMap(mapA, mapB, changes))
|
||||
assert.NotNil(t, changes)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Added))
|
||||
// keys 5 and 6 were added
|
||||
assert.EqualValues(t, uint64(6), changes.Added[0].key)
|
||||
assert.EqualValues(t, []byte{9}, changes.Added[0].val)
|
||||
assert.EqualValues(t, uint64(5), changes.Added[1].key)
|
||||
assert.EqualValues(t, []byte{8}, changes.Added[1].val)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Modified))
|
||||
// keys 1 and 4 were modified
|
||||
assert.EqualValues(t, uint64(1), changes.Modified[0].From.key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Modified[0].From.val)
|
||||
assert.EqualValues(t, uint64(1), changes.Modified[0].To.key)
|
||||
assert.EqualValues(t, []byte{1}, changes.Modified[0].To.val)
|
||||
assert.EqualValues(t, uint64(4), changes.Modified[1].From.key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Modified[1].From.val)
|
||||
assert.EqualValues(t, uint64(4), changes.Modified[1].To.key)
|
||||
assert.EqualValues(t, []byte{6}, changes.Modified[1].To.val)
|
||||
|
||||
assert.Equal(t, 2, len(changes.Removed))
|
||||
// keys 0 and 2 were deleted
|
||||
assert.EqualValues(t, uint64(0), changes.Removed[0].key)
|
||||
assert.EqualValues(t, []byte{0}, changes.Removed[0].val)
|
||||
assert.EqualValues(t, uint64(2), changes.Removed[1].key)
|
||||
assert.EqualValues(t, []byte{1}, changes.Removed[1].val)
|
||||
|
||||
}
|
||||
|
||||
type TestDiffMap struct {
|
||||
Added []adtMapDiffResult
|
||||
Modified []TestAdtMapDiffModified
|
||||
Removed []adtMapDiffResult
|
||||
}
|
||||
|
||||
var _ AdtMapDiff = &TestDiffMap{}
|
||||
|
||||
func (t *TestDiffMap) AsKey(key string) (abi.Keyer, error) {
|
||||
k, err := abi.ParseUIntKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return abi.UIntKey(k), nil
|
||||
}
|
||||
|
||||
func (t *TestDiffMap) Add(key string, val *typegen.Deferred) error {
|
||||
v := new(runtime.CBORBytes)
|
||||
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, err := abi.ParseUIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Added = append(t.Added, adtMapDiffResult{
|
||||
key: k,
|
||||
val: *v,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestDiffMap) Modify(key string, from, to *typegen.Deferred) error {
|
||||
vFrom := new(runtime.CBORBytes)
|
||||
err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vTo := new(runtime.CBORBytes)
|
||||
err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k, err := abi.ParseUIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(*vFrom, *vTo) {
|
||||
t.Modified = append(t.Modified, TestAdtMapDiffModified{
|
||||
From: adtMapDiffResult{
|
||||
key: k,
|
||||
val: *vFrom,
|
||||
},
|
||||
To: adtMapDiffResult{
|
||||
key: k,
|
||||
val: *vTo,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestDiffMap) Remove(key string, val *typegen.Deferred) error {
|
||||
v := new(runtime.CBORBytes)
|
||||
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, err := abi.ParseUIntKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Removed = append(t.Removed, adtMapDiffResult{
|
||||
key: k,
|
||||
val: *v,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
type adtMapDiffResult struct {
|
||||
key uint64
|
||||
val runtime.CBORBytes
|
||||
}
|
||||
|
||||
type TestAdtMapDiffModified struct {
|
||||
From adtMapDiffResult
|
||||
To adtMapDiffResult
|
||||
}
|
||||
|
||||
type adtArrayDiffResult struct {
|
||||
key uint64
|
||||
val runtime.CBORBytes
|
||||
}
|
||||
|
||||
type TestDiffArray struct {
|
||||
Added []adtArrayDiffResult
|
||||
Modified []TestAdtArrayDiffModified
|
||||
Removed []adtArrayDiffResult
|
||||
}
|
||||
|
||||
var _ AdtArrayDiff = &TestDiffArray{}
|
||||
|
||||
type TestAdtArrayDiffModified struct {
|
||||
From adtArrayDiffResult
|
||||
To adtArrayDiffResult
|
||||
}
|
||||
|
||||
func (t *TestDiffArray) Add(key uint64, val *typegen.Deferred) error {
|
||||
v := new(runtime.CBORBytes)
|
||||
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Added = append(t.Added, adtArrayDiffResult{
|
||||
key: key,
|
||||
val: *v,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestDiffArray) Modify(key uint64, from, to *typegen.Deferred) error {
|
||||
vFrom := new(runtime.CBORBytes)
|
||||
err := vFrom.UnmarshalCBOR(bytes.NewReader(from.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vTo := new(runtime.CBORBytes)
|
||||
err = vTo.UnmarshalCBOR(bytes.NewReader(to.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(*vFrom, *vTo) {
|
||||
t.Modified = append(t.Modified, TestAdtArrayDiffModified{
|
||||
From: adtArrayDiffResult{
|
||||
key: key,
|
||||
val: *vFrom,
|
||||
},
|
||||
To: adtArrayDiffResult{
|
||||
key: key,
|
||||
val: *vTo,
|
||||
},
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TestDiffArray) Remove(key uint64, val *typegen.Deferred) error {
|
||||
v := new(runtime.CBORBytes)
|
||||
err := v.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Removed = append(t.Removed, adtArrayDiffResult{
|
||||
key: key,
|
||||
val: *v,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func newContextStore() Store {
|
||||
ctx := context.Background()
|
||||
bs := bstore.NewTemporarySync()
|
||||
store := cbornode.NewCborStore(bs)
|
||||
return WrapStore(ctx, store)
|
||||
}
|
||||
17
chain/actors/adt/store.go
Normal file
17
chain/actors/adt/store.go
Normal file
@ -0,0 +1,17 @@
|
||||
package adt
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
adt "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
Context() context.Context
|
||||
cbor.IpldStore
|
||||
}
|
||||
|
||||
func WrapStore(ctx context.Context, store cbor.IpldStore) Store {
|
||||
return adt.WrapStore(ctx, store)
|
||||
}
|
||||
@ -3,13 +3,14 @@ package aerrors
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func IsFatal(err ActorError) bool {
|
||||
return err != nil && err.IsFatal()
|
||||
}
|
||||
func RetCode(err ActorError) uint8 {
|
||||
func RetCode(err ActorError) exitcode.ExitCode {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
@ -25,12 +26,12 @@ type internalActorError interface {
|
||||
type ActorError interface {
|
||||
error
|
||||
IsFatal() bool
|
||||
RetCode() uint8
|
||||
RetCode() exitcode.ExitCode
|
||||
}
|
||||
|
||||
type actorError struct {
|
||||
fatal bool
|
||||
retCode uint8
|
||||
retCode exitcode.ExitCode
|
||||
|
||||
msg string
|
||||
frame xerrors.Frame
|
||||
@ -41,7 +42,7 @@ func (e *actorError) IsFatal() bool {
|
||||
return e.fatal
|
||||
}
|
||||
|
||||
func (e *actorError) RetCode() uint8 {
|
||||
func (e *actorError) RetCode() exitcode.ExitCode {
|
||||
return e.retCode
|
||||
}
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ package aerrors_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
. "github.com/filecoin-project/lotus/chain/actors/aerrors"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -31,5 +32,5 @@ func TestAbsorbeError(t *testing.T) {
|
||||
aw3 := Wrap(aw2, "creating miner in storage market")
|
||||
t.Logf("Verbose error: %+v", aw3)
|
||||
t.Logf("Normal error: %v", aw3)
|
||||
assert.Equal(t, uint8(35), RetCode(aw3))
|
||||
assert.Equal(t, exitcode.ExitCode(35), RetCode(aw3))
|
||||
}
|
||||
|
||||
@ -4,12 +4,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
hamt "github.com/ipfs/go-hamt-ipld"
|
||||
"github.com/filecoin-project/go-state-types/exitcode"
|
||||
cbor "github.com/ipfs/go-ipld-cbor"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// New creates a new non-fatal error
|
||||
func New(retCode uint8, message string) ActorError {
|
||||
func New(retCode exitcode.ExitCode, message string) ActorError {
|
||||
if retCode == 0 {
|
||||
return &actorError{
|
||||
fatal: true,
|
||||
@ -29,7 +30,7 @@ func New(retCode uint8, message string) ActorError {
|
||||
}
|
||||
|
||||
// Newf creates a new non-fatal error
|
||||
func Newf(retCode uint8, format string, args ...interface{}) ActorError {
|
||||
func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
|
||||
if retCode == 0 {
|
||||
return &actorError{
|
||||
fatal: true,
|
||||
@ -48,6 +49,27 @@ func Newf(retCode uint8, format string, args ...interface{}) ActorError {
|
||||
}
|
||||
}
|
||||
|
||||
// todo: bit hacky
|
||||
|
||||
func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
|
||||
if retCode == 0 {
|
||||
return &actorError{
|
||||
fatal: true,
|
||||
retCode: 0,
|
||||
|
||||
msg: "tried creating an error and setting RetCode to 0",
|
||||
frame: xerrors.Caller(skip),
|
||||
err: fmt.Errorf(format, args...),
|
||||
}
|
||||
}
|
||||
return &actorError{
|
||||
retCode: retCode,
|
||||
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
frame: xerrors.Caller(skip),
|
||||
}
|
||||
}
|
||||
|
||||
func Fatal(message string, args ...interface{}) ActorError {
|
||||
return &actorError{
|
||||
fatal: true,
|
||||
@ -95,7 +117,7 @@ func Wrapf(err ActorError, format string, args ...interface{}) ActorError {
|
||||
}
|
||||
|
||||
// Absorb takes and error and makes in not fatal ActorError
|
||||
func Absorb(err error, retCode uint8, msg string) ActorError {
|
||||
func Absorb(err error, retCode exitcode.ExitCode, msg string) ActorError {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
@ -160,7 +182,7 @@ func HandleExternalError(err error, msg string) ActorError {
|
||||
}
|
||||
}
|
||||
|
||||
if xerrors.Is(err, &hamt.SerializationError{}) {
|
||||
if xerrors.Is(err, &cbor.SerializationError{}) {
|
||||
return &actorError{
|
||||
fatal: false,
|
||||
retCode: 253,
|
||||
@ -171,7 +193,8 @@ func HandleExternalError(err error, msg string) ActorError {
|
||||
}
|
||||
|
||||
return &actorError{
|
||||
fatal: true,
|
||||
fatal: false,
|
||||
retCode: 219,
|
||||
|
||||
msg: msg,
|
||||
frame: xerrors.Caller(1),
|
||||
|
||||
29
chain/actors/builtin/README.md
Normal file
29
chain/actors/builtin/README.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Actors
|
||||
|
||||
This package contains shims for abstracting over different actor versions.
|
||||
|
||||
## Design
|
||||
|
||||
Shims in this package follow a few common design principles.
|
||||
|
||||
### Structure Agnostic
|
||||
|
||||
Shims interfaces defined in this package should (ideally) not change even if the
|
||||
structure of the underlying data changes. For example:
|
||||
|
||||
* All shims store an internal "store" object. That way, state can be moved into
|
||||
a separate object without needing to add a store to the function signature.
|
||||
* All functions must return an error, even if unused for now.
|
||||
|
||||
### Minimal
|
||||
|
||||
These interfaces should be expanded only as necessary to reduce maintenance burden.
|
||||
|
||||
### Queries, not field assessors.
|
||||
|
||||
When possible, functions should query the state instead of simply acting as
|
||||
field assessors. These queries are more likely to remain stable across
|
||||
specs-actor upgrades than specific state fields.
|
||||
|
||||
Note: there is a trade-off here. Avoid implementing _complicated_ query logic
|
||||
inside these shims, as it will need to be replicated in every shim.
|
||||
31
chain/actors/builtin/account/account.go
Normal file
31
chain/actors/builtin/account/account.go
Normal file
@ -0,0 +1,31 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
case builtin0.AccountActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
PubkeyAddress() (address.Address, error)
|
||||
}
|
||||
18
chain/actors/builtin/account/v0.go
Normal file
18
chain/actors/builtin/account/v0.go
Normal file
@ -0,0 +1,18 @@
|
||||
package account
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/account"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
account.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) PubkeyAddress() (address.Address, error) {
|
||||
return s.Address, nil
|
||||
}
|
||||
43
chain/actors/builtin/builtin.go
Normal file
43
chain/actors/builtin/builtin.go
Normal file
@ -0,0 +1,43 @@
|
||||
package builtin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
proof0 "github.com/filecoin-project/specs-actors/actors/runtime/proof"
|
||||
|
||||
smoothing0 "github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/network"
|
||||
)
|
||||
|
||||
type Version int
|
||||
|
||||
const (
|
||||
Version0 = iota
|
||||
)
|
||||
|
||||
// Converts a network version into a specs-actors version.
|
||||
func VersionForNetwork(version network.Version) Version {
|
||||
switch version {
|
||||
case network.Version0, network.Version1, network.Version2, network.Version3:
|
||||
return Version0
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported network version %d", version))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Why does actors have 2 different versions of this?
|
||||
type SectorInfo = proof0.SectorInfo
|
||||
type PoStProof = proof0.PoStProof
|
||||
type FilterEstimate = smoothing0.FilterEstimate
|
||||
|
||||
func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate {
|
||||
return (FilterEstimate)(v0)
|
||||
}
|
||||
|
||||
// Doesn't change between actors v0 and v1
|
||||
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower {
|
||||
return miner0.QAPowerForWeight(size, duration, dealWeight, verifiedWeight)
|
||||
}
|
||||
44
chain/actors/builtin/init/init.go
Normal file
44
chain/actors/builtin/init/init.go
Normal file
@ -0,0 +1,44 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
)
|
||||
|
||||
var Address = builtin0.InitActorAddr
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
case builtin0.InitActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
ResolveAddress(address address.Address) (address.Address, bool, error)
|
||||
MapAddressToNewID(address address.Address) (address.Address, error)
|
||||
NetworkName() (dtypes.NetworkName, error)
|
||||
|
||||
ForEachActor(func(id abi.ActorID, address address.Address) error) error
|
||||
|
||||
// Remove exists to support tooling that manipulates state for testing.
|
||||
// It should not be used in production code, as init actor entries are
|
||||
// immutable.
|
||||
Remove(addrs ...address.Address) error
|
||||
}
|
||||
67
chain/actors/builtin/init/v0.go
Normal file
67
chain/actors/builtin/init/v0.go
Normal file
@ -0,0 +1,67 @@
|
||||
package init
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/node/modules/dtypes"
|
||||
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
init_.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) ResolveAddress(address address.Address) (address.Address, bool, error) {
|
||||
return s.State.ResolveAddress(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state0) MapAddressToNewID(address address.Address) (address.Address, error) {
|
||||
return s.State.MapAddressToNewID(s.store, address)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error {
|
||||
addrs, err := adt0.AsMap(s.store, s.State.AddressMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var actorID cbg.CborInt
|
||||
return addrs.ForEach(&actorID, func(key string) error {
|
||||
addr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(abi.ActorID(actorID), addr)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state0) NetworkName() (dtypes.NetworkName, error) {
|
||||
return dtypes.NetworkName(s.State.NetworkName), nil
|
||||
}
|
||||
|
||||
func (s *state0) Remove(addrs ...address.Address) (err error) {
|
||||
m, err := adt0.AsMap(s.store, s.State.AddressMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
if err = m.Delete(abi.AddrKey(addr)); err != nil {
|
||||
return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err)
|
||||
}
|
||||
}
|
||||
amr, err := m.Root()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to get address map root: %w", err)
|
||||
}
|
||||
s.State.AddressMap = amr
|
||||
return nil
|
||||
}
|
||||
91
chain/actors/builtin/market/diff.go
Normal file
91
chain/actors/builtin/market/diff.go
Normal file
@ -0,0 +1,91 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
func DiffDealProposals(pre, cur DealProposals) (*DealProposalChanges, error) {
|
||||
results := new(DealProposalChanges)
|
||||
if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketProposalsDiffer{results, pre, cur}); err != nil {
|
||||
return nil, fmt.Errorf("diffing deal states: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type marketProposalsDiffer struct {
|
||||
Results *DealProposalChanges
|
||||
pre, cur DealProposals
|
||||
}
|
||||
|
||||
func (d *marketProposalsDiffer) Add(key uint64, val *cbg.Deferred) error {
|
||||
dp, err := d.cur.decode(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Results.Added = append(d.Results.Added, ProposalIDState{abi.DealID(key), *dp})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *marketProposalsDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
|
||||
// short circuit, DealProposals are static
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *marketProposalsDiffer) Remove(key uint64, val *cbg.Deferred) error {
|
||||
dp, err := d.pre.decode(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Results.Removed = append(d.Results.Removed, ProposalIDState{abi.DealID(key), *dp})
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiffDealStates(pre, cur DealStates) (*DealStateChanges, error) {
|
||||
results := new(DealStateChanges)
|
||||
if err := adt.DiffAdtArray(pre.array(), cur.array(), &marketStatesDiffer{results, pre, cur}); err != nil {
|
||||
return nil, fmt.Errorf("diffing deal states: %w", err)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type marketStatesDiffer struct {
|
||||
Results *DealStateChanges
|
||||
pre, cur DealStates
|
||||
}
|
||||
|
||||
func (d *marketStatesDiffer) Add(key uint64, val *cbg.Deferred) error {
|
||||
ds, err := d.cur.decode(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Results.Added = append(d.Results.Added, DealIDState{abi.DealID(key), *ds})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *marketStatesDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
|
||||
dsFrom, err := d.pre.decode(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dsTo, err := d.cur.decode(to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *dsFrom != *dsTo {
|
||||
d.Results.Modified = append(d.Results.Modified, DealStateChange{abi.DealID(key), dsFrom, dsTo})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *marketStatesDiffer) Remove(key uint64, val *cbg.Deferred) error {
|
||||
ds, err := d.pre.decode(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Results.Removed = append(d.Results.Removed, DealIDState{abi.DealID(key), *ds})
|
||||
return nil
|
||||
}
|
||||
129
chain/actors/builtin/market/market.go
Normal file
129
chain/actors/builtin/market/market.go
Normal file
@ -0,0 +1,129 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Address = builtin0.StorageMarketActorAddr
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.StorageMarketActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
BalancesChanged(State) (bool, error)
|
||||
EscrowTable() (BalanceTable, error)
|
||||
LockedTable() (BalanceTable, error)
|
||||
TotalLocked() (abi.TokenAmount, error)
|
||||
StatesChanged(State) (bool, error)
|
||||
States() (DealStates, error)
|
||||
ProposalsChanged(State) (bool, error)
|
||||
Proposals() (DealProposals, error)
|
||||
VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error)
|
||||
}
|
||||
|
||||
type BalanceTable interface {
|
||||
ForEach(cb func(address.Address, abi.TokenAmount) error) error
|
||||
Get(key address.Address) (abi.TokenAmount, error)
|
||||
}
|
||||
|
||||
type DealStates interface {
|
||||
ForEach(cb func(id abi.DealID, ds DealState) error) error
|
||||
Get(id abi.DealID) (*DealState, bool, error)
|
||||
|
||||
array() adt.Array
|
||||
decode(*cbg.Deferred) (*DealState, error)
|
||||
}
|
||||
|
||||
type DealProposals interface {
|
||||
ForEach(cb func(id abi.DealID, dp DealProposal) error) error
|
||||
Get(id abi.DealID) (*DealProposal, bool, error)
|
||||
|
||||
array() adt.Array
|
||||
decode(*cbg.Deferred) (*DealProposal, error)
|
||||
}
|
||||
|
||||
type PublishStorageDealsParams = market0.PublishStorageDealsParams
|
||||
type PublishStorageDealsReturn = market0.PublishStorageDealsReturn
|
||||
type VerifyDealsForActivationParams = market0.VerifyDealsForActivationParams
|
||||
|
||||
type ClientDealProposal = market0.ClientDealProposal
|
||||
|
||||
type DealState struct {
|
||||
SectorStartEpoch abi.ChainEpoch // -1 if not yet included in proven sector
|
||||
LastUpdatedEpoch abi.ChainEpoch // -1 if deal state never updated
|
||||
SlashEpoch abi.ChainEpoch // -1 if deal never slashed
|
||||
}
|
||||
|
||||
type DealProposal struct {
|
||||
PieceCID cid.Cid
|
||||
PieceSize abi.PaddedPieceSize
|
||||
VerifiedDeal bool
|
||||
Client address.Address
|
||||
Provider address.Address
|
||||
Label string
|
||||
StartEpoch abi.ChainEpoch
|
||||
EndEpoch abi.ChainEpoch
|
||||
StoragePricePerEpoch abi.TokenAmount
|
||||
ProviderCollateral abi.TokenAmount
|
||||
ClientCollateral abi.TokenAmount
|
||||
}
|
||||
|
||||
type DealStateChanges struct {
|
||||
Added []DealIDState
|
||||
Modified []DealStateChange
|
||||
Removed []DealIDState
|
||||
}
|
||||
|
||||
type DealIDState struct {
|
||||
ID abi.DealID
|
||||
Deal DealState
|
||||
}
|
||||
|
||||
// DealStateChange is a change in deal state from -> to
|
||||
type DealStateChange struct {
|
||||
ID abi.DealID
|
||||
From *DealState
|
||||
To *DealState
|
||||
}
|
||||
|
||||
type DealProposalChanges struct {
|
||||
Added []ProposalIDState
|
||||
Removed []ProposalIDState
|
||||
}
|
||||
|
||||
type ProposalIDState struct {
|
||||
ID abi.DealID
|
||||
Proposal DealProposal
|
||||
}
|
||||
|
||||
func EmptyDealState() *DealState {
|
||||
return &DealState{
|
||||
SectorStartEpoch: -1,
|
||||
SlashEpoch: -1,
|
||||
LastUpdatedEpoch: -1,
|
||||
}
|
||||
}
|
||||
192
chain/actors/builtin/market/v0.go
Normal file
192
chain/actors/builtin/market/v0.go
Normal file
@ -0,0 +1,192 @@
|
||||
package market
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
market.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) TotalLocked() (abi.TokenAmount, error) {
|
||||
fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral)
|
||||
fml = types.BigAdd(fml, s.TotalClientStorageFee)
|
||||
return fml, nil
|
||||
}
|
||||
|
||||
func (s *state0) BalancesChanged(otherState State) (bool, error) {
|
||||
otherState0, ok := otherState.(*state0)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.EscrowTable.Equals(otherState0.State.EscrowTable) || !s.State.LockedTable.Equals(otherState0.State.LockedTable), nil
|
||||
}
|
||||
|
||||
func (s *state0) StatesChanged(otherState State) (bool, error) {
|
||||
otherState0, ok := otherState.(*state0)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.States.Equals(otherState0.State.States), nil
|
||||
}
|
||||
|
||||
func (s *state0) States() (DealStates, error) {
|
||||
stateArray, err := adt0.AsArray(s.store, s.State.States)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealStates0{stateArray}, nil
|
||||
}
|
||||
|
||||
func (s *state0) ProposalsChanged(otherState State) (bool, error) {
|
||||
otherState0, ok := otherState.(*state0)
|
||||
if !ok {
|
||||
// there's no way to compare different versions of the state, so let's
|
||||
// just say that means the state of balances has changed
|
||||
return true, nil
|
||||
}
|
||||
return !s.State.Proposals.Equals(otherState0.State.Proposals), nil
|
||||
}
|
||||
|
||||
func (s *state0) Proposals() (DealProposals, error) {
|
||||
proposalArray, err := adt0.AsArray(s.store, s.State.Proposals)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dealProposals0{proposalArray}, nil
|
||||
}
|
||||
|
||||
func (s *state0) EscrowTable() (BalanceTable, error) {
|
||||
bt, err := adt0.AsBalanceTable(s.store, s.State.EscrowTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable0{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state0) LockedTable() (BalanceTable, error) {
|
||||
bt, err := adt0.AsBalanceTable(s.store, s.State.LockedTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &balanceTable0{bt}, nil
|
||||
}
|
||||
|
||||
func (s *state0) VerifyDealsForActivation(
|
||||
minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch,
|
||||
) (weight, verifiedWeight abi.DealWeight, err error) {
|
||||
return market.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch)
|
||||
}
|
||||
|
||||
type balanceTable0 struct {
|
||||
*adt0.BalanceTable
|
||||
}
|
||||
|
||||
func (bt *balanceTable0) ForEach(cb func(address.Address, abi.TokenAmount) error) error {
|
||||
asMap := (*adt0.Map)(bt.BalanceTable)
|
||||
var ta abi.TokenAmount
|
||||
return asMap.ForEach(&ta, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, ta)
|
||||
})
|
||||
}
|
||||
|
||||
type dealStates0 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealStates0) Get(dealID abi.DealID) (*DealState, bool, error) {
|
||||
var deal0 market.DealState
|
||||
found, err := s.Array.Get(uint64(dealID), &deal0)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
deal := fromV0DealState(deal0)
|
||||
return &deal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealStates0) ForEach(cb func(dealID abi.DealID, ds DealState) error) error {
|
||||
var ds0 market.DealState
|
||||
return s.Array.ForEach(&ds0, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV0DealState(ds0))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealStates0) decode(val *cbg.Deferred) (*DealState, error) {
|
||||
var ds0 market.DealState
|
||||
if err := ds0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ds := fromV0DealState(ds0)
|
||||
return &ds, nil
|
||||
}
|
||||
|
||||
func (s *dealStates0) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV0DealState(v0 market.DealState) DealState {
|
||||
return (DealState)(v0)
|
||||
}
|
||||
|
||||
type dealProposals0 struct {
|
||||
adt.Array
|
||||
}
|
||||
|
||||
func (s *dealProposals0) Get(dealID abi.DealID) (*DealProposal, bool, error) {
|
||||
var proposal0 market.DealProposal
|
||||
found, err := s.Array.Get(uint64(dealID), &proposal0)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if !found {
|
||||
return nil, false, nil
|
||||
}
|
||||
proposal := fromV0DealProposal(proposal0)
|
||||
return &proposal, true, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals0) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error {
|
||||
var dp0 market.DealProposal
|
||||
return s.Array.ForEach(&dp0, func(idx int64) error {
|
||||
return cb(abi.DealID(idx), fromV0DealProposal(dp0))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *dealProposals0) decode(val *cbg.Deferred) (*DealProposal, error) {
|
||||
var dp0 market.DealProposal
|
||||
if err := dp0.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dp := fromV0DealProposal(dp0)
|
||||
return &dp, nil
|
||||
}
|
||||
|
||||
func (s *dealProposals0) array() adt.Array {
|
||||
return s.Array
|
||||
}
|
||||
|
||||
func fromV0DealProposal(v0 market.DealProposal) DealProposal {
|
||||
return (DealProposal)(v0)
|
||||
}
|
||||
127
chain/actors/builtin/miner/diff.go
Normal file
127
chain/actors/builtin/miner/diff.go
Normal file
@ -0,0 +1,127 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
)
|
||||
|
||||
func DiffPreCommits(pre, cur State) (*PreCommitChanges, error) {
|
||||
results := new(PreCommitChanges)
|
||||
|
||||
prep, err := pre.precommits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curp, err := cur.precommits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = adt.DiffAdtMap(prep, curp, &preCommitDiffer{results, pre, cur})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type preCommitDiffer struct {
|
||||
Results *PreCommitChanges
|
||||
pre, after State
|
||||
}
|
||||
|
||||
func (m *preCommitDiffer) AsKey(key string) (abi.Keyer, error) {
|
||||
sector, err := abi.ParseUIntKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return abi.UIntKey(sector), nil
|
||||
}
|
||||
|
||||
func (m *preCommitDiffer) Add(key string, val *cbg.Deferred) error {
|
||||
sp, err := m.after.decodeSectorPreCommitOnChainInfo(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Results.Added = append(m.Results.Added, sp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *preCommitDiffer) Modify(key string, from, to *cbg.Deferred) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *preCommitDiffer) Remove(key string, val *cbg.Deferred) error {
|
||||
sp, err := m.pre.decodeSectorPreCommitOnChainInfo(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Results.Removed = append(m.Results.Removed, sp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DiffSectors(pre, cur State) (*SectorChanges, error) {
|
||||
results := new(SectorChanges)
|
||||
|
||||
pres, err := pre.sectors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
curs, err := cur.sectors()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = adt.DiffAdtArray(pres, curs, §orDiffer{results, pre, cur})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
type sectorDiffer struct {
|
||||
Results *SectorChanges
|
||||
pre, after State
|
||||
}
|
||||
|
||||
func (m *sectorDiffer) Add(key uint64, val *cbg.Deferred) error {
|
||||
si, err := m.after.decodeSectorOnChainInfo(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Results.Added = append(m.Results.Added, si)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *sectorDiffer) Modify(key uint64, from, to *cbg.Deferred) error {
|
||||
siFrom, err := m.pre.decodeSectorOnChainInfo(from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
siTo, err := m.after.decodeSectorOnChainInfo(to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if siFrom.Expiration != siTo.Expiration {
|
||||
m.Results.Extended = append(m.Results.Extended, SectorExtensions{
|
||||
From: siFrom,
|
||||
To: siTo,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *sectorDiffer) Remove(key uint64, val *cbg.Deferred) error {
|
||||
si, err := m.pre.decodeSectorOnChainInfo(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Results.Removed = append(m.Results.Removed, si)
|
||||
return nil
|
||||
}
|
||||
169
chain/actors/builtin/miner/miner.go
Normal file
169
chain/actors/builtin/miner/miner.go
Normal file
@ -0,0 +1,169 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-cid"
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// Unchanged between v0 and v1 actors
|
||||
var WPoStProvingPeriod = miner0.WPoStProvingPeriod
|
||||
|
||||
const MinSectorExpiration = miner0.MinSectorExpiration
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.StorageMinerActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
// Total available balance to spend.
|
||||
AvailableBalance(abi.TokenAmount) (abi.TokenAmount, error)
|
||||
// Funds that will vest by the given epoch.
|
||||
VestedFunds(abi.ChainEpoch) (abi.TokenAmount, error)
|
||||
// Funds locked for various reasons.
|
||||
LockedFunds() (LockedFunds, error)
|
||||
|
||||
GetSector(abi.SectorNumber) (*SectorOnChainInfo, error)
|
||||
FindSector(abi.SectorNumber) (*SectorLocation, error)
|
||||
GetSectorExpiration(abi.SectorNumber) (*SectorExpiration, error)
|
||||
GetPrecommittedSector(abi.SectorNumber) (*SectorPreCommitOnChainInfo, error)
|
||||
LoadSectors(sectorNos *bitfield.BitField) ([]*SectorOnChainInfo, error)
|
||||
NumLiveSectors() (uint64, error)
|
||||
IsAllocated(abi.SectorNumber) (bool, error)
|
||||
|
||||
LoadDeadline(idx uint64) (Deadline, error)
|
||||
ForEachDeadline(cb func(idx uint64, dl Deadline) error) error
|
||||
NumDeadlines() (uint64, error)
|
||||
DeadlinesChanged(State) (bool, error)
|
||||
|
||||
Info() (MinerInfo, error)
|
||||
|
||||
DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error)
|
||||
|
||||
// Diff helpers. Used by Diff* functions internally.
|
||||
sectors() (adt.Array, error)
|
||||
decodeSectorOnChainInfo(*cbg.Deferred) (SectorOnChainInfo, error)
|
||||
precommits() (adt.Map, error)
|
||||
decodeSectorPreCommitOnChainInfo(*cbg.Deferred) (SectorPreCommitOnChainInfo, error)
|
||||
}
|
||||
|
||||
type Deadline interface {
|
||||
LoadPartition(idx uint64) (Partition, error)
|
||||
ForEachPartition(cb func(idx uint64, part Partition) error) error
|
||||
PostSubmissions() (bitfield.BitField, error)
|
||||
|
||||
PartitionsChanged(Deadline) (bool, error)
|
||||
}
|
||||
|
||||
type Partition interface {
|
||||
AllSectors() (bitfield.BitField, error)
|
||||
FaultySectors() (bitfield.BitField, error)
|
||||
RecoveringSectors() (bitfield.BitField, error)
|
||||
LiveSectors() (bitfield.BitField, error)
|
||||
ActiveSectors() (bitfield.BitField, error)
|
||||
}
|
||||
|
||||
type SectorOnChainInfo struct {
|
||||
SectorNumber abi.SectorNumber
|
||||
SealProof abi.RegisteredSealProof
|
||||
SealedCID cid.Cid
|
||||
DealIDs []abi.DealID
|
||||
Activation abi.ChainEpoch
|
||||
Expiration abi.ChainEpoch
|
||||
DealWeight abi.DealWeight
|
||||
VerifiedDealWeight abi.DealWeight
|
||||
InitialPledge abi.TokenAmount
|
||||
ExpectedDayReward abi.TokenAmount
|
||||
ExpectedStoragePledge abi.TokenAmount
|
||||
}
|
||||
|
||||
type SectorPreCommitInfo = miner0.SectorPreCommitInfo
|
||||
|
||||
type SectorPreCommitOnChainInfo struct {
|
||||
Info SectorPreCommitInfo
|
||||
PreCommitDeposit abi.TokenAmount
|
||||
PreCommitEpoch abi.ChainEpoch
|
||||
DealWeight abi.DealWeight
|
||||
VerifiedDealWeight abi.DealWeight
|
||||
}
|
||||
|
||||
type PoStPartition = miner0.PoStPartition
|
||||
type RecoveryDeclaration = miner0.RecoveryDeclaration
|
||||
type FaultDeclaration = miner0.FaultDeclaration
|
||||
|
||||
// Params
|
||||
type DeclareFaultsParams = miner0.DeclareFaultsParams
|
||||
type DeclareFaultsRecoveredParams = miner0.DeclareFaultsRecoveredParams
|
||||
type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams
|
||||
type ProveCommitSectorParams = miner0.ProveCommitSectorParams
|
||||
|
||||
type MinerInfo struct {
|
||||
Owner address.Address // Must be an ID-address.
|
||||
Worker address.Address // Must be an ID-address.
|
||||
NewWorker address.Address // Must be an ID-address.
|
||||
ControlAddresses []address.Address // Must be an ID-addresses.
|
||||
WorkerChangeEpoch abi.ChainEpoch
|
||||
PeerId *peer.ID
|
||||
Multiaddrs []abi.Multiaddrs
|
||||
SealProofType abi.RegisteredSealProof
|
||||
SectorSize abi.SectorSize
|
||||
WindowPoStPartitionSectors uint64
|
||||
}
|
||||
|
||||
type SectorExpiration struct {
|
||||
OnTime abi.ChainEpoch
|
||||
|
||||
// non-zero if sector is faulty, epoch at which it will be permanently
|
||||
// removed if it doesn't recover
|
||||
Early abi.ChainEpoch
|
||||
}
|
||||
|
||||
type SectorLocation struct {
|
||||
Deadline uint64
|
||||
Partition uint64
|
||||
}
|
||||
|
||||
type SectorChanges struct {
|
||||
Added []SectorOnChainInfo
|
||||
Extended []SectorExtensions
|
||||
Removed []SectorOnChainInfo
|
||||
}
|
||||
|
||||
type SectorExtensions struct {
|
||||
From SectorOnChainInfo
|
||||
To SectorOnChainInfo
|
||||
}
|
||||
|
||||
type PreCommitChanges struct {
|
||||
Added []SectorPreCommitOnChainInfo
|
||||
Removed []SectorPreCommitOnChainInfo
|
||||
}
|
||||
|
||||
type LockedFunds struct {
|
||||
VestingFunds abi.TokenAmount
|
||||
InitialPledgeRequirement abi.TokenAmount
|
||||
PreCommitDeposits abi.TokenAmount
|
||||
}
|
||||
28
chain/actors/builtin/miner/utils.go
Normal file
28
chain/actors/builtin/miner/utils.go
Normal file
@ -0,0 +1,28 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
)
|
||||
|
||||
func AllPartSectors(mas State, sget func(Partition) (bitfield.BitField, error)) (bitfield.BitField, error) {
|
||||
var parts []bitfield.BitField
|
||||
|
||||
err := mas.ForEachDeadline(func(dlidx uint64, dl Deadline) error {
|
||||
return dl.ForEachPartition(func(partidx uint64, part Partition) error {
|
||||
s, err := sget(part)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("getting sector list (dl: %d, part %d): %w", dlidx, partidx, err)
|
||||
}
|
||||
|
||||
parts = append(parts, s)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return bitfield.BitField{}, err
|
||||
}
|
||||
|
||||
return bitfield.MultiMerge(parts...)
|
||||
}
|
||||
373
chain/actors/builtin/miner/v0.go
Normal file
373
chain/actors/builtin/miner/v0.go
Normal file
@ -0,0 +1,373 @@
|
||||
package miner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/dline"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
miner0.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type deadline0 struct {
|
||||
miner0.Deadline
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
type partition0 struct {
|
||||
miner0.Partition
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) AvailableBalance(bal abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return s.GetAvailableBalance(bal), nil
|
||||
}
|
||||
|
||||
func (s *state0) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.CheckVestedFunds(s.store, epoch)
|
||||
}
|
||||
|
||||
func (s *state0) LockedFunds() (LockedFunds, error) {
|
||||
return LockedFunds{
|
||||
VestingFunds: s.State.LockedFunds,
|
||||
InitialPledgeRequirement: s.State.InitialPledgeRequirement,
|
||||
PreCommitDeposits: s.State.PreCommitDeposits,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state0) InitialPledge() (abi.TokenAmount, error) {
|
||||
return s.State.InitialPledgeRequirement, nil
|
||||
}
|
||||
|
||||
func (s *state0) PreCommitDeposits() (abi.TokenAmount, error) {
|
||||
return s.State.PreCommitDeposits, nil
|
||||
}
|
||||
|
||||
func (s *state0) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV0SectorOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state0) FindSector(num abi.SectorNumber) (*SectorLocation, error) {
|
||||
dlIdx, partIdx, err := s.State.FindSector(s.store, num)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SectorLocation{
|
||||
Deadline: dlIdx,
|
||||
Partition: partIdx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state0) NumLiveSectors() (uint64, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var total uint64
|
||||
if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
|
||||
total += dl.LiveSectors
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// GetSectorExpiration returns the effective expiration of the given sector.
|
||||
//
|
||||
// If the sector does not expire early, the Early expiration field is 0.
|
||||
func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: this can be optimized significantly.
|
||||
// 1. If the sector is non-faulty, it will either expire on-time (can be
|
||||
// learned from the sector info), or in the next quantized expiration
|
||||
// epoch (i.e., the first element in the partition's expiration queue.
|
||||
// 2. If it's faulty, it will expire early within the first 14 entries
|
||||
// of the expiration queue.
|
||||
stopErr := errors.New("stop")
|
||||
out := SectorExpiration{}
|
||||
err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error {
|
||||
partitions, err := dl.PartitionsArray(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quant := s.State.QuantSpecForDeadline(dlIdx)
|
||||
var part miner0.Partition
|
||||
return partitions.ForEach(&part, func(partIdx int64) error {
|
||||
if found, err := part.Sectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if !found {
|
||||
return nil
|
||||
}
|
||||
if found, err := part.Terminated.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if found {
|
||||
// already terminated
|
||||
return stopErr
|
||||
}
|
||||
|
||||
q, err := miner0.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var exp miner0.ExpirationSet
|
||||
return q.ForEach(&exp, func(epoch int64) error {
|
||||
if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if early {
|
||||
out.Early = abi.ChainEpoch(epoch)
|
||||
return nil
|
||||
}
|
||||
if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil {
|
||||
return err
|
||||
} else if onTime {
|
||||
out.OnTime = abi.ChainEpoch(epoch)
|
||||
return stopErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
})
|
||||
if err == stopErr {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out.Early == 0 && out.OnTime == 0 {
|
||||
return nil, xerrors.Errorf("failed to find sector %d", num)
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
|
||||
func (s *state0) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) {
|
||||
info, ok, err := s.State.GetPrecommittedSector(s.store, num)
|
||||
if !ok || err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret := fromV0SectorPreCommitOnChainInfo(*info)
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
func (s *state0) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) {
|
||||
sectors, err := miner0.LoadSectors(s.store, s.State.Sectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If no sector numbers are specified, load all.
|
||||
if snos == nil {
|
||||
infos := make([]*SectorOnChainInfo, 0, sectors.Length())
|
||||
var info0 miner0.SectorOnChainInfo
|
||||
if err := sectors.ForEach(&info0, func(_ int64) error {
|
||||
info := fromV0SectorOnChainInfo(info0)
|
||||
infos = append(infos, &info)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
// Otherwise, load selected.
|
||||
infos0, err := sectors.Load(*snos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos := make([]*SectorOnChainInfo, len(infos0))
|
||||
for i, info0 := range infos0 {
|
||||
info := fromV0SectorOnChainInfo(*info0)
|
||||
infos[i] = &info
|
||||
}
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *state0) IsAllocated(num abi.SectorNumber) (bool, error) {
|
||||
var allocatedSectors bitfield.BitField
|
||||
if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return allocatedSectors.IsSet(uint64(num))
|
||||
}
|
||||
|
||||
func (s *state0) LoadDeadline(idx uint64) (Deadline, error) {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dl, err := dls.LoadDeadline(s.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deadline0{*dl, s.store}, nil
|
||||
}
|
||||
|
||||
func (s *state0) ForEachDeadline(cb func(uint64, Deadline) error) error {
|
||||
dls, err := s.State.LoadDeadlines(s.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return dls.ForEach(s.store, func(i uint64, dl *miner0.Deadline) error {
|
||||
return cb(i, &deadline0{*dl, s.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state0) NumDeadlines() (uint64, error) {
|
||||
return miner0.WPoStPeriodDeadlines, nil
|
||||
}
|
||||
|
||||
func (s *state0) DeadlinesChanged(other State) (bool, error) {
|
||||
other0, ok := other.(*state0)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return s.State.Deadlines.Equals(other0.Deadlines), nil
|
||||
}
|
||||
|
||||
func (s *state0) Info() (MinerInfo, error) {
|
||||
info, err := s.State.GetInfo(s.store)
|
||||
if err != nil {
|
||||
return MinerInfo{}, err
|
||||
}
|
||||
|
||||
var pid *peer.ID
|
||||
if peerID, err := peer.IDFromBytes(info.PeerId); err == nil {
|
||||
pid = &peerID
|
||||
}
|
||||
|
||||
mi := MinerInfo{
|
||||
Owner: info.Owner,
|
||||
Worker: info.Worker,
|
||||
ControlAddresses: info.ControlAddresses,
|
||||
|
||||
NewWorker: address.Undef,
|
||||
WorkerChangeEpoch: -1,
|
||||
|
||||
PeerId: pid,
|
||||
Multiaddrs: info.Multiaddrs,
|
||||
SealProofType: info.SealProofType,
|
||||
SectorSize: info.SectorSize,
|
||||
WindowPoStPartitionSectors: info.WindowPoStPartitionSectors,
|
||||
}
|
||||
|
||||
if info.PendingWorkerKey != nil {
|
||||
mi.NewWorker = info.PendingWorkerKey.NewWorker
|
||||
mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt
|
||||
}
|
||||
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (s *state0) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) {
|
||||
return s.State.DeadlineInfo(epoch), nil
|
||||
}
|
||||
|
||||
func (s *state0) sectors() (adt.Array, error) {
|
||||
return adt0.AsArray(s.store, s.Sectors)
|
||||
}
|
||||
|
||||
func (s *state0) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) {
|
||||
var si miner0.SectorOnChainInfo
|
||||
err := si.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV0SectorOnChainInfo(si), nil
|
||||
}
|
||||
|
||||
func (s *state0) precommits() (adt.Map, error) {
|
||||
return adt0.AsMap(s.store, s.PreCommittedSectors)
|
||||
}
|
||||
|
||||
func (s *state0) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) {
|
||||
var sp miner0.SectorPreCommitOnChainInfo
|
||||
err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw))
|
||||
if err != nil {
|
||||
return SectorPreCommitOnChainInfo{}, err
|
||||
}
|
||||
|
||||
return fromV0SectorPreCommitOnChainInfo(sp), nil
|
||||
}
|
||||
|
||||
func (d *deadline0) LoadPartition(idx uint64) (Partition, error) {
|
||||
p, err := d.Deadline.LoadPartition(d.store, idx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &partition0{*p, d.store}, nil
|
||||
}
|
||||
|
||||
func (d *deadline0) ForEachPartition(cb func(uint64, Partition) error) error {
|
||||
ps, err := d.Deadline.PartitionsArray(d.store)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var part miner0.Partition
|
||||
return ps.ForEach(&part, func(i int64) error {
|
||||
return cb(uint64(i), &partition0{part, d.store})
|
||||
})
|
||||
}
|
||||
|
||||
func (d *deadline0) PartitionsChanged(other Deadline) (bool, error) {
|
||||
other0, ok := other.(*deadline0)
|
||||
if !ok {
|
||||
// treat an upgrade as a change, always
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return d.Deadline.Partitions.Equals(other0.Deadline.Partitions), nil
|
||||
}
|
||||
|
||||
func (d *deadline0) PostSubmissions() (bitfield.BitField, error) {
|
||||
return d.Deadline.PostSubmissions, nil
|
||||
}
|
||||
|
||||
func (p *partition0) AllSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Sectors, nil
|
||||
}
|
||||
|
||||
func (p *partition0) FaultySectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Faults, nil
|
||||
}
|
||||
|
||||
func (p *partition0) RecoveringSectors() (bitfield.BitField, error) {
|
||||
return p.Partition.Recoveries, nil
|
||||
}
|
||||
|
||||
func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo {
|
||||
return (SectorOnChainInfo)(v0)
|
||||
}
|
||||
|
||||
func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo {
|
||||
return (SectorPreCommitOnChainInfo)(v0)
|
||||
}
|
||||
43
chain/actors/builtin/multisig/multisig.go
Normal file
43
chain/actors/builtin/multisig/multisig.go
Normal file
@ -0,0 +1,43 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
case builtin0.MultisigActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
LockedBalance(epoch abi.ChainEpoch) (abi.TokenAmount, error)
|
||||
StartEpoch() (abi.ChainEpoch, error)
|
||||
UnlockDuration() (abi.ChainEpoch, error)
|
||||
InitialBalance() (abi.TokenAmount, error)
|
||||
Threshold() (uint64, error)
|
||||
Signers() ([]address.Address, error)
|
||||
|
||||
ForEachPendingTxn(func(id int64, txn Transaction) error) error
|
||||
}
|
||||
|
||||
type Transaction = msig0.Transaction
|
||||
59
chain/actors/builtin/multisig/v0.go
Normal file
59
chain/actors/builtin/multisig/v0.go
Normal file
@ -0,0 +1,59 @@
|
||||
package multisig
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
msig0.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) {
|
||||
return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil
|
||||
}
|
||||
|
||||
func (s *state0) StartEpoch() (abi.ChainEpoch, error) {
|
||||
return s.State.StartEpoch, nil
|
||||
}
|
||||
|
||||
func (s *state0) UnlockDuration() (abi.ChainEpoch, error) {
|
||||
return s.State.UnlockDuration, nil
|
||||
}
|
||||
|
||||
func (s *state0) InitialBalance() (abi.TokenAmount, error) {
|
||||
return s.State.InitialBalance, nil
|
||||
}
|
||||
|
||||
func (s *state0) Threshold() (uint64, error) {
|
||||
return s.State.NumApprovalsThreshold, nil
|
||||
}
|
||||
|
||||
func (s *state0) Signers() ([]address.Address, error) {
|
||||
return s.State.Signers, nil
|
||||
}
|
||||
|
||||
func (s *state0) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error {
|
||||
arr, err := adt0.AsMap(s.store, s.State.PendingTxns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var out msig0.Transaction
|
||||
return arr.ForEach(&out, func(key string) error {
|
||||
txid, n := binary.Varint([]byte(key))
|
||||
if n <= 0 {
|
||||
return xerrors.Errorf("invalid pending transaction key: %v", key)
|
||||
}
|
||||
return cb(txid, (Transaction)(out))
|
||||
})
|
||||
}
|
||||
89
chain/actors/builtin/paych/mock/mock.go
Normal file
89
chain/actors/builtin/paych/mock/mock.go
Normal file
@ -0,0 +1,89 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/paych"
|
||||
)
|
||||
|
||||
type mockState struct {
|
||||
from address.Address
|
||||
to address.Address
|
||||
settlingAt abi.ChainEpoch
|
||||
toSend abi.TokenAmount
|
||||
lanes map[uint64]paych.LaneState
|
||||
}
|
||||
|
||||
type mockLaneState struct {
|
||||
redeemed big.Int
|
||||
nonce uint64
|
||||
}
|
||||
|
||||
// NewMockPayChState constructs a state for a payment channel with the set fixed values
|
||||
// that satisfies the paych.State interface.
|
||||
func NewMockPayChState(from address.Address,
|
||||
to address.Address,
|
||||
settlingAt abi.ChainEpoch,
|
||||
toSend abi.TokenAmount,
|
||||
lanes map[uint64]paych.LaneState,
|
||||
) paych.State {
|
||||
return &mockState{from, to, settlingAt, toSend, lanes}
|
||||
}
|
||||
|
||||
// NewMockLaneState constructs a state for a payment channel lane with the set fixed values
|
||||
// that satisfies the paych.LaneState interface. Useful for populating lanes when
|
||||
// calling NewMockPayChState
|
||||
func NewMockLaneState(redeemed big.Int, nonce uint64) paych.LaneState {
|
||||
return &mockLaneState{redeemed, nonce}
|
||||
}
|
||||
|
||||
func (ms *mockState) MarshalCBOR(io.Writer) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (ms *mockState) From() (address.Address, error) {
|
||||
return ms.from, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (ms *mockState) To() (address.Address, error) {
|
||||
return ms.to, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (ms *mockState) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return ms.settlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (ms *mockState) ToSend() (abi.TokenAmount, error) {
|
||||
return ms.toSend, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (ms *mockState) LaneCount() (uint64, error) {
|
||||
return uint64(len(ms.lanes)), nil
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (ms *mockState) ForEachLaneState(cb func(idx uint64, dl paych.LaneState) error) error {
|
||||
var lastErr error
|
||||
for lane, state := range ms.lanes {
|
||||
if err := cb(lane, state); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (mls *mockLaneState) Redeemed() (big.Int, error) {
|
||||
return mls.redeemed, nil
|
||||
}
|
||||
|
||||
func (mls *mockLaneState) Nonce() (uint64, error) {
|
||||
return mls.nonce, nil
|
||||
}
|
||||
60
chain/actors/builtin/paych/paych.go
Normal file
60
chain/actors/builtin/paych/paych.go
Normal file
@ -0,0 +1,60 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
paych0 "github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
// Load returns an abstract copy of payment channel state, irregardless of actor version
|
||||
func Load(store adt.Store, act *types.Actor) (State, error) {
|
||||
switch act.Code {
|
||||
case builtin0.PaymentChannelActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
// State is an abstract version of payment channel state that works across
|
||||
// versions
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
// Channel owner, who has funded the actor
|
||||
From() (address.Address, error)
|
||||
// Recipient of payouts from channel
|
||||
To() (address.Address, error)
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
SettlingAt() (abi.ChainEpoch, error)
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
ToSend() (abi.TokenAmount, error)
|
||||
|
||||
// Get total number of lanes
|
||||
LaneCount() (uint64, error)
|
||||
|
||||
// Iterate lane states
|
||||
ForEachLaneState(cb func(idx uint64, dl LaneState) error) error
|
||||
}
|
||||
|
||||
// LaneState is an abstract copy of the state of a single lane
|
||||
type LaneState interface {
|
||||
Redeemed() (big.Int, error)
|
||||
Nonce() (uint64, error)
|
||||
}
|
||||
|
||||
type SignedVoucher = paych0.SignedVoucher
|
||||
type ModVerifyParams = paych0.ModVerifyParams
|
||||
91
chain/actors/builtin/paych/v0.go
Normal file
91
chain/actors/builtin/paych/v0.go
Normal file
@ -0,0 +1,91 @@
|
||||
package paych
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
big "github.com/filecoin-project/go-state-types/big"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
paych.State
|
||||
store adt.Store
|
||||
lsAmt *adt0.Array
|
||||
}
|
||||
|
||||
// Channel owner, who has funded the actor
|
||||
func (s *state0) From() (address.Address, error) {
|
||||
return s.State.From, nil
|
||||
}
|
||||
|
||||
// Recipient of payouts from channel
|
||||
func (s *state0) To() (address.Address, error) {
|
||||
return s.State.To, nil
|
||||
}
|
||||
|
||||
// Height at which the channel can be `Collected`
|
||||
func (s *state0) SettlingAt() (abi.ChainEpoch, error) {
|
||||
return s.State.SettlingAt, nil
|
||||
}
|
||||
|
||||
// Amount successfully redeemed through the payment channel, paid out on `Collect()`
|
||||
func (s *state0) ToSend() (abi.TokenAmount, error) {
|
||||
return s.State.ToSend, nil
|
||||
}
|
||||
|
||||
func (s *state0) getOrLoadLsAmt() (*adt0.Array, error) {
|
||||
if s.lsAmt != nil {
|
||||
return s.lsAmt, nil
|
||||
}
|
||||
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := adt0.AsArray(s.store, s.State.LaneStates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.lsAmt = lsamt
|
||||
return lsamt, nil
|
||||
}
|
||||
|
||||
// Get total number of lanes
|
||||
func (s *state0) LaneCount() (uint64, error) {
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return lsamt.Length(), nil
|
||||
}
|
||||
|
||||
// Iterate lane states
|
||||
func (s *state0) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error {
|
||||
// Get the lane state from the chain
|
||||
lsamt, err := s.getOrLoadLsAmt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note: we use a map instead of an array to store laneStates because the
|
||||
// client sets the lane ID (the index) and potentially they could use a
|
||||
// very large index.
|
||||
var ls paych.LaneState
|
||||
return lsamt.ForEach(&ls, func(i int64) error {
|
||||
return cb(uint64(i), &laneState0{ls})
|
||||
})
|
||||
}
|
||||
|
||||
type laneState0 struct {
|
||||
paych.LaneState
|
||||
}
|
||||
|
||||
func (ls *laneState0) Redeemed() (big.Int, error) {
|
||||
return ls.LaneState.Redeemed, nil
|
||||
}
|
||||
|
||||
func (ls *laneState0) Nonce() (uint64, error) {
|
||||
return ls.LaneState.Nonce, nil
|
||||
}
|
||||
53
chain/actors/builtin/power/power.go
Normal file
53
chain/actors/builtin/power/power.go
Normal file
@ -0,0 +1,53 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Address = builtin0.StoragePowerActorAddr
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.StoragePowerActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
TotalLocked() (abi.TokenAmount, error)
|
||||
TotalPower() (Claim, error)
|
||||
TotalCommitted() (Claim, error)
|
||||
TotalPowerSmoothed() (builtin.FilterEstimate, error)
|
||||
|
||||
// MinerCounts returns the number of miners. Participating is the number
|
||||
// with power above the minimum miner threshold.
|
||||
MinerCounts() (participating, total uint64, err error)
|
||||
MinerPower(address.Address) (Claim, bool, error)
|
||||
MinerNominalPowerMeetsConsensusMinimum(address.Address) (bool, error)
|
||||
ListAllMiners() ([]address.Address, error)
|
||||
}
|
||||
|
||||
type Claim struct {
|
||||
// Sum of raw byte power for a miner's sectors.
|
||||
RawBytePower abi.StoragePower
|
||||
|
||||
// Sum of quality adjusted power for a miner's sectors.
|
||||
QualityAdjPower abi.StoragePower
|
||||
}
|
||||
85
chain/actors/builtin/power/v0.go
Normal file
85
chain/actors/builtin/power/v0.go
Normal file
@ -0,0 +1,85 @@
|
||||
package power
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
power0.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) TotalLocked() (abi.TokenAmount, error) {
|
||||
return s.TotalPledgeCollateral, nil
|
||||
}
|
||||
|
||||
func (s *state0) TotalPower() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalRawBytePower,
|
||||
QualityAdjPower: s.TotalQualityAdjPower,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Committed power to the network. Includes miners below the minimum threshold.
|
||||
func (s *state0) TotalCommitted() (Claim, error) {
|
||||
return Claim{
|
||||
RawBytePower: s.TotalBytesCommitted,
|
||||
QualityAdjPower: s.TotalQABytesCommitted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *state0) MinerPower(addr address.Address) (Claim, bool, error) {
|
||||
claims, err := adt.AsMap(s.store, s.Claims)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
var claim power0.Claim
|
||||
ok, err := claims.Get(abi.AddrKey(addr), &claim)
|
||||
if err != nil {
|
||||
return Claim{}, false, err
|
||||
}
|
||||
return Claim{
|
||||
RawBytePower: claim.RawBytePower,
|
||||
QualityAdjPower: claim.QualityAdjPower,
|
||||
}, ok, nil
|
||||
}
|
||||
|
||||
func (s *state0) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) {
|
||||
return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a)
|
||||
}
|
||||
|
||||
func (s *state0) TotalPowerSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FromV0FilterEstimate(*s.State.ThisEpochQAPowerSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state0) MinerCounts() (uint64, uint64, error) {
|
||||
return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil
|
||||
}
|
||||
|
||||
func (s *state0) ListAllMiners() ([]address.Address, error) {
|
||||
claims, err := adt.AsMap(s.store, s.Claims)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var miners []address.Address
|
||||
err = claims.ForEach(nil, func(k string) error {
|
||||
a, err := address.NewFromBytes([]byte(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
miners = append(miners, a)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return miners, nil
|
||||
}
|
||||
50
chain/actors/builtin/reward/reward.go
Normal file
50
chain/actors/builtin/reward/reward.go
Normal file
@ -0,0 +1,50 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-state-types/cbor"
|
||||
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var Address = builtin0.RewardActorAddr
|
||||
|
||||
func Load(store adt.Store, act *types.Actor) (st State, err error) {
|
||||
switch act.Code {
|
||||
case builtin0.RewardActorCodeID:
|
||||
out := state0{store: store}
|
||||
err := store.Get(store.Context(), act.Head, &out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &out, nil
|
||||
}
|
||||
return nil, xerrors.Errorf("unknown actor code %s", act.Code)
|
||||
}
|
||||
|
||||
type State interface {
|
||||
cbor.Marshaler
|
||||
|
||||
ThisEpochBaselinePower() (abi.StoragePower, error)
|
||||
ThisEpochReward() (abi.StoragePower, error)
|
||||
ThisEpochRewardSmoothed() (builtin.FilterEstimate, error)
|
||||
|
||||
EffectiveBaselinePower() (abi.StoragePower, error)
|
||||
EffectiveNetworkTime() (abi.ChainEpoch, error)
|
||||
|
||||
TotalStoragePowerReward() (abi.TokenAmount, error)
|
||||
|
||||
CumsumBaseline() (abi.StoragePower, error)
|
||||
CumsumRealized() (abi.StoragePower, error)
|
||||
|
||||
InitialPledgeForPower(abi.StoragePower, abi.TokenAmount, *builtin.FilterEstimate, abi.TokenAmount) (abi.TokenAmount, error)
|
||||
PreCommitDepositForPower(builtin.FilterEstimate, abi.StoragePower) (abi.TokenAmount, error)
|
||||
}
|
||||
|
||||
type AwardBlockRewardParams = reward0.AwardBlockRewardParams
|
||||
71
chain/actors/builtin/reward/v0.go
Normal file
71
chain/actors/builtin/reward/v0.go
Normal file
@ -0,0 +1,71 @@
|
||||
package reward
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin"
|
||||
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/smoothing"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
reward.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func (s *state0) ThisEpochReward() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochReward, nil
|
||||
}
|
||||
|
||||
func (s *state0) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) {
|
||||
return builtin.FromV0FilterEstimate(*s.State.ThisEpochRewardSmoothed), nil
|
||||
}
|
||||
|
||||
func (s *state0) ThisEpochBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.ThisEpochBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state0) TotalStoragePowerReward() (abi.TokenAmount, error) {
|
||||
return s.State.TotalMined, nil
|
||||
}
|
||||
|
||||
func (s *state0) EffectiveBaselinePower() (abi.StoragePower, error) {
|
||||
return s.State.EffectiveBaselinePower, nil
|
||||
}
|
||||
|
||||
func (s *state0) EffectiveNetworkTime() (abi.ChainEpoch, error) {
|
||||
return s.State.EffectiveNetworkTime, nil
|
||||
}
|
||||
|
||||
func (s *state0) CumsumBaseline() (abi.StoragePower, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state0) CumsumRealized() (abi.StoragePower, error) {
|
||||
return s.State.CumsumBaseline, nil
|
||||
}
|
||||
|
||||
func (s *state0) InitialPledgeForPower(sectorWeight abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) {
|
||||
return miner0.InitialPledgeForPower(
|
||||
sectorWeight,
|
||||
s.State.ThisEpochBaselinePower,
|
||||
networkTotalPledge,
|
||||
s.State.ThisEpochRewardSmoothed,
|
||||
&smoothing.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
circSupply), nil
|
||||
}
|
||||
|
||||
func (s *state0) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) {
|
||||
return miner0.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed,
|
||||
&smoothing.FilterEstimate{
|
||||
PositionEstimate: networkQAPower.PositionEstimate,
|
||||
VelocityEstimate: networkQAPower.VelocityEstimate,
|
||||
},
|
||||
sectorWeight), nil
|
||||
}
|
||||
71
chain/actors/builtin/verifreg/v0.go
Normal file
71
chain/actors/builtin/verifreg/v0.go
Normal file
@ -0,0 +1,71 @@
|
||||
package verifreg
|
||||
|
||||
import (
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-state-types/abi"
|
||||
"github.com/filecoin-project/go-state-types/big"
|
||||
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
|
||||
adt0 "github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
)
|
||||
|
||||
var _ State = (*state0)(nil)
|
||||
|
||||
type state0 struct {
|
||||
verifreg0.State
|
||||
store adt.Store
|
||||
}
|
||||
|
||||
func getDataCap(store adt.Store, root cid.Cid, addr address.Address) (bool, abi.StoragePower, error) {
|
||||
if addr.Protocol() != address.ID {
|
||||
return false, big.Zero(), xerrors.Errorf("can only look up ID addresses")
|
||||
}
|
||||
|
||||
vh, err := adt0.AsMap(store, root)
|
||||
if err != nil {
|
||||
return false, big.Zero(), xerrors.Errorf("loading verifreg: %w", err)
|
||||
}
|
||||
|
||||
var dcap abi.StoragePower
|
||||
if found, err := vh.Get(abi.AddrKey(addr), &dcap); err != nil {
|
||||
return false, big.Zero(), xerrors.Errorf("looking up addr: %w", err)
|
||||
} else if !found {
|
||||
return false, big.Zero(), nil
|
||||
}
|
||||
|
||||
return true, dcap, nil
|
||||
}
|
||||
|
||||
func (s *state0) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, s.State.VerifiedClients, addr)
|
||||
}
|
||||
|
||||
func (s *state0) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) {
|
||||
return getDataCap(s.store, s.State.Verifiers, addr)
|
||||
}
|
||||
|
||||
func forEachCap(store adt.Store, root cid.Cid, cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
vh, err := adt0.AsMap(store, root)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("loading verified clients: %w", err)
|
||||
}
|
||||
var dcap abi.StoragePower
|
||||
return vh.ForEach(&dcap, func(key string) error {
|
||||
a, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cb(a, dcap)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *state0) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, s.State.Verifiers, cb)
|
||||
}
|
||||
|
||||
func (s *state0) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error {
|
||||
return forEachCap(s.store, s.State.VerifiedClients, cb)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user